Merge remote-tracking branch 'aosp/upstream-master' into master am: d66a6515d5 am: 1eb82cc7c1 am: 1340a67e99
am: 974cbd9cc5
Change-Id: Id4f7df50b98e7a37f85adeb26c9b3187fdcea5fb
diff --git a/Android.bp b/Android.bp
index 54371c6..ebf00ab 100644
--- a/Android.bp
+++ b/Android.bp
@@ -85,6 +85,7 @@
defaults: ["vixl-common"],
cppflags: [
"-DVIXL_CODE_BUFFER_MALLOC",
+ "-DVIXL_INCLUDE_TARGET_T32",
],
srcs: ["src/aarch32/*.cc"],
}
@@ -95,6 +96,7 @@
cppflags: [
"-DVIXL_INCLUDE_SIMULATOR_AARCH64",
"-DVIXL_CODE_BUFFER_MMAP",
+ "-DVIXL_INCLUDE_TARGET_A64",
],
srcs: ["src/aarch64/*.cc"],
}
diff --git a/benchmarks/aarch32/bench-branch-link-masm.cc b/benchmarks/aarch32/bench-branch-link-masm.cc
index f26027c..05f4afc 100644
--- a/benchmarks/aarch32/bench-branch-link-masm.cc
+++ b/benchmarks/aarch32/bench-branch-link-masm.cc
@@ -97,8 +97,12 @@
exit(1);
}
+#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(iterations, A32);
+#endif
+#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(iterations, T32);
+#endif
return 0;
}
diff --git a/benchmarks/aarch32/bench-branch-masm.cc b/benchmarks/aarch32/bench-branch-masm.cc
index ac48a39..aaeb65e 100644
--- a/benchmarks/aarch32/bench-branch-masm.cc
+++ b/benchmarks/aarch32/bench-branch-masm.cc
@@ -87,8 +87,12 @@
exit(1);
}
+#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(iterations, A32);
+#endif
+#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(iterations, T32);
+#endif
return 0;
}
diff --git a/benchmarks/aarch32/bench-dataop.cc b/benchmarks/aarch32/bench-dataop.cc
index 308f2d3..e5d31d1 100644
--- a/benchmarks/aarch32/bench-dataop.cc
+++ b/benchmarks/aarch32/bench-dataop.cc
@@ -82,8 +82,12 @@
exit(1);
}
+#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(instructions, A32);
+#endif
+#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(instructions, T32);
+#endif
return 0;
}
diff --git a/examples/aarch32/switch.cc b/examples/aarch32/switch.cc
deleted file mode 100644
index f4fef15..0000000
--- a/examples/aarch32/switch.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2016, VIXL authors
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of ARM Limited nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "examples.h"
-
-#define __ masm->
-
-void GenerateGradeComment(MacroAssembler* masm) {
- // Given a grade 'A', 'B', 'C', 'D' or 'F', print a relevant
- // comment.
- // void GradeComment(char grade)
- Label invalid_input;
-
- __ Subs(r1, r0, 'A');
- __ B(lt, &invalid_input);
-
- Label break_switch;
- // Manage the cases from 'A' to 'F' included.
- // Switch/Case handles inputs from [0, n[
- // and here n is 'F' - 'A' + 1 which is 6
- // 'E' will be handled in default case.
- JumpTable8bitOffset grade_comment('F' - 'A' + 1);
- __ Switch(r1, &grade_comment);
-
- __ Case(&grade_comment, 'A' - 'A');
- __ Printf("Excellent!!\n");
- __ Break(&grade_comment);
-
- __ Case(&grade_comment, 'B' - 'A');
- __ Printf("Good job!\n");
- __ Break(&grade_comment);
-
- __ Case(&grade_comment, 'C' - 'A');
- __ Printf("You passed\n");
- __ Break(&grade_comment);
-
- __ Case(&grade_comment, 'D' - 'A');
- __ Printf("Could do better\n");
- __ Break(&grade_comment);
-
- __ Case(&grade_comment, 'F' - 'A');
- __ Printf("You failed!\n");
- __ Break(&grade_comment);
-
- __ Default(&grade_comment);
- __ Bind(&invalid_input);
- __ Printf("Invalid input: %c\n", r0);
- __ EndSwitch(&grade_comment);
-
- __ Bx(lr);
-}
-
-#ifndef TEST_EXAMPLES
-int main() {
- MacroAssembler masm(T32);
- // Generate the code for the example function.
- Label entry_point;
- masm.Bind(&entry_point);
- GenerateGradeComment(&masm);
- masm.FinalizeCode();
-#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
- // There is no simulator defined for VIXL AArch32.
- printf("This example cannot be simulated\n");
-#else
- byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
- uint32_t code_size = masm.GetSizeOfCodeGenerated();
- ExecutableMemory memory(code, code_size);
- // Run the example function.
- void (*comment_function)(char grade) =
- memory.GetEntryPoint<void (*)(char)>(entry_point);
- (*comment_function)('A');
- (*comment_function)('B');
- (*comment_function)('F');
- (*comment_function)('E');
- (*comment_function)('a');
- (*comment_function)('G');
-#endif
- return 0;
-}
-#endif // TEST_EXAMPLES
diff --git a/examples/aarch64/literal.cc b/examples/aarch64/literal.cc
index 77607c7..f1c5b33 100644
--- a/examples/aarch64/literal.cc
+++ b/examples/aarch64/literal.cc
@@ -42,10 +42,9 @@
Label start;
masm.Bind(&start);
{
- CodeBufferCheckScope scope(&masm,
- kInstructionSize + sizeof(int64_t),
- CodeBufferCheckScope::kCheck,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm,
+ kInstructionSize + sizeof(int64_t),
+ ExactAssemblyScope::kExactSize);
Label over_literal;
__ b(&over_literal);
__ place(&manually_placed_literal);
diff --git a/src/aarch32/constants-aarch32.h b/src/aarch32/constants-aarch32.h
index c53c24e..1b23502 100644
--- a/src/aarch32/constants-aarch32.h
+++ b/src/aarch32/constants-aarch32.h
@@ -32,6 +32,8 @@
#include <stdint.h>
}
+#include "globals-vixl.h"
+
namespace vixl {
namespace aarch32 {
diff --git a/src/aarch32/disasm-aarch32.cc b/src/aarch32/disasm-aarch32.cc
index fe5386c..85c6b4f 100644
--- a/src/aarch32/disasm-aarch32.cc
+++ b/src/aarch32/disasm-aarch32.cc
@@ -8186,10 +8186,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -8239,10 +8238,9 @@
revsh(CurrentCond(), Best, Register(rd), Register(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -8318,10 +8316,9 @@
UnimplementedT32_16("SEVL", instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9481,10 +9478,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9575,10 +9571,9 @@
instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9587,10 +9582,9 @@
UnimplementedT32_32("DBG", instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9631,10 +9625,9 @@
UnimplementedT32_32("CPSID", instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9684,10 +9677,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9781,10 +9773,9 @@
UnimplementedT32_32("DCPS3", instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -9817,17 +9808,15 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -10149,10 +10138,9 @@
MemOperand(Register(rn), Offset));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -10575,10 +10563,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -10998,10 +10985,9 @@
MemOperand(Register(rn), sign, offset, PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -11126,10 +11112,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -11837,10 +11822,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -12442,10 +12426,9 @@
Offset));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -13070,10 +13053,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -13676,10 +13658,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -14273,10 +14254,9 @@
Offset));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -14899,10 +14879,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -16355,10 +16334,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -16968,10 +16946,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17073,10 +17050,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17154,10 +17130,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17259,10 +17234,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17554,10 +17528,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17604,10 +17577,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17734,10 +17706,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -17844,10 +17815,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -18046,10 +18016,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -18082,10 +18051,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -19284,17 +19252,15 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -19418,10 +19384,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -19708,10 +19673,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -20039,10 +20003,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -20232,10 +20195,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -20478,10 +20440,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -20765,17 +20726,15 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -21060,17 +21019,15 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -21461,10 +21418,9 @@
Register(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -21741,10 +21697,9 @@
Register(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -21980,10 +21935,9 @@
Register(ra));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -22305,10 +22259,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -22469,10 +22422,9 @@
Register(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -22537,10 +22489,9 @@
Register(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -23041,10 +22992,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -23254,10 +23204,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -24593,10 +24542,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -24884,10 +24832,9 @@
vcvtm(dt, F32, SRegister(rd), SRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -25014,17 +24961,15 @@
vcvtm(dt, F64, SRegister(rd), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -25168,10 +25113,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -25238,10 +25182,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26105,10 +26048,9 @@
UnimplementedT32_32("SHA256SU1", instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26167,10 +26109,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26252,10 +26193,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26314,10 +26254,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26412,10 +26351,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26474,10 +26412,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26534,17 +26471,15 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -26933,10 +26868,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -27559,10 +27493,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28062,10 +27995,9 @@
instr);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28133,10 +28065,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28204,10 +28135,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28264,10 +28194,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28334,10 +28263,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -28643,10 +28571,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -29971,10 +29898,9 @@
DRegisterLane(rm, lane));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -30385,10 +30311,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -30434,10 +30359,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -30470,10 +30394,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -30530,10 +30453,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -31142,10 +31064,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -31227,10 +31148,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -31287,10 +31207,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -31395,17 +31314,15 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -32311,10 +32228,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -32455,10 +32371,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -32599,10 +32514,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -32743,10 +32657,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -35413,10 +35326,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -35460,20 +35372,18 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -36417,10 +36327,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -36472,10 +36381,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -36519,10 +36427,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -37516,20 +37423,18 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -38576,10 +38481,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -38635,10 +38539,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -38690,10 +38593,9 @@
}
break;
}
- default: {
+ default:
UnallocatedT32(instr);
break;
- }
}
break;
}
@@ -38762,10 +38664,9 @@
UnimplementedA32("CPSID", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -38778,10 +38679,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39472,10 +39372,9 @@
UnimplementedA32("SHA256SU1", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39518,10 +39417,9 @@
vabd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39591,10 +39489,9 @@
vabd(al, F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39649,10 +39546,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39743,10 +39639,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39789,10 +39684,9 @@
vpmin(al, F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -39841,17 +39735,15 @@
vmin(al, F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40692,10 +40584,9 @@
vfms(al, F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40744,10 +40635,9 @@
vfms(al, F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40781,10 +40671,9 @@
vmul(al, F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40854,10 +40743,9 @@
vmul(al, F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40882,10 +40770,9 @@
vacgt(al, F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40934,10 +40821,9 @@
vacgt(al, F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -40988,10 +40874,9 @@
vminnm(F32, DRegister(rd), DRegister(rn), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -41090,17 +40975,15 @@
vminnm(F32, QRegister(rd), QRegister(rn), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -41427,10 +41310,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -41960,10 +41842,9 @@
vneg(al, dt, QRegister(rd), QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42382,10 +42263,9 @@
UnimplementedA32("SHA256SU0", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42444,10 +42324,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42506,10 +42385,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42558,10 +42436,9 @@
DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42620,10 +42497,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -42893,10 +42769,9 @@
QRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -44099,10 +43974,9 @@
DRegisterLane(rm, lane));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -44884,10 +44758,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -45020,10 +44893,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -45156,10 +45028,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -45292,10 +45163,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -47405,10 +47275,9 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -47447,20 +47316,18 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -48119,10 +47986,9 @@
vmov(al, dt, DRegister(rd), imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -48162,10 +48028,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -48205,10 +48070,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -49071,20 +48935,18 @@
imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -49827,10 +49689,9 @@
vmov(al, dt, QRegister(rd), imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -49874,10 +49735,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -49925,10 +49785,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -50522,10 +50381,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -51098,10 +50956,9 @@
Offset));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -51708,10 +51565,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -52398,10 +52254,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -52975,10 +52830,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -53543,10 +53397,9 @@
Offset));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -54151,10 +54004,9 @@
PostIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -54893,10 +54745,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -55440,17 +55291,15 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -55616,10 +55465,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -55666,10 +55514,9 @@
UnimplementedA32("SRSIB", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -56070,10 +55917,9 @@
vcvtm(dt, F32, SRegister(rd), SRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -56180,17 +56026,15 @@
vcvtm(dt, F64, SRegister(rd), DRegister(rm));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -56229,10 +56073,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -57031,10 +56874,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -57129,10 +56971,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -57748,10 +57589,9 @@
hlt(al, imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58169,10 +58009,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58296,10 +58135,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58378,10 +58216,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58501,10 +58338,9 @@
hvc(al, imm);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58556,10 +58392,9 @@
UnimplementedA32("SMC", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58750,10 +58585,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58818,17 +58652,15 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -58998,10 +58830,9 @@
PreIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -59065,10 +58896,9 @@
MemOperand(Register(rn), sign, offset, PreIndex));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -60486,10 +60316,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -60609,10 +60438,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -60963,10 +60791,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -61027,17 +60854,15 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -62093,10 +61918,9 @@
UnimplementedA32("SEVL", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -62109,10 +61933,9 @@
UnimplementedA32("DBG", instr);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63086,17 +62909,15 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63241,10 +63062,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63285,10 +63105,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63439,10 +63258,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63562,10 +63380,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63787,10 +63604,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63938,10 +63754,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -63982,10 +63797,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64139,10 +63953,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64262,10 +64075,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64553,10 +64365,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64666,10 +64477,9 @@
sbfx(condition, Register(rd), Register(rn), lsb, width);
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64806,10 +64616,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64877,10 +64686,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64925,10 +64733,9 @@
Register(ra));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -64973,10 +64780,9 @@
Register(ra));
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -65017,10 +64823,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -65120,10 +64925,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -65928,10 +65732,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -66169,10 +65972,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -67613,10 +67415,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -67780,10 +67581,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
@@ -67858,10 +67658,9 @@
}
break;
}
- default: {
+ default:
UnallocatedA32(instr);
break;
- }
}
break;
}
diff --git a/src/aarch32/instructions-aarch32.h b/src/aarch32/instructions-aarch32.h
index 18292a8..7c2fc44 100644
--- a/src/aarch32/instructions-aarch32.h
+++ b/src/aarch32/instructions-aarch32.h
@@ -285,11 +285,13 @@
DataTypeValue value_;
public:
- DataType(DataTypeValue value) : value_(value) {} // NOLINT(runtime/explicit)
- DataType(uint32_t size) // NOLINT(runtime/explicit)
+ explicit DataType(uint32_t size)
: value_(static_cast<DataTypeValue>(kDataTypeUntyped | size)) {
VIXL_ASSERT((size == 8) || (size == 16) || (size == 32) || (size == 64));
}
+ // Users should be able to use "S8", "S6" and so forth to instantiate this
+ // class.
+ DataType(DataTypeValue value) : value_(value) {} // NOLINT(runtime/explicit)
DataTypeValue GetValue() const { return value_; }
DataTypeType GetType() const {
return static_cast<DataTypeType>(value_ & kDataTypeTypeMask);
@@ -1002,6 +1004,8 @@
explicit Condition(uint32_t condition) : condition_(condition) {
VIXL_ASSERT(condition <= kNone);
}
+ // Users should be able to use "eq", "ne" and so forth to instantiate this
+ // class.
Condition(ConditionType condition) // NOLINT(runtime/explicit)
: condition_(condition) {}
uint32_t GetCondition() const { return condition_ & kMask; }
diff --git a/src/aarch32/macro-assembler-aarch32.cc b/src/aarch32/macro-assembler-aarch32.cc
index 1e7a6b5..d7492f8 100644
--- a/src/aarch32/macro-assembler-aarch32.cc
+++ b/src/aarch32/macro-assembler-aarch32.cc
@@ -302,6 +302,7 @@
void VeneerPoolManager::EmitLabel(Label* label, Label::Offset emitted_target) {
+ VIXL_ASSERT(!IsBlocked());
// Define the veneer.
Label veneer;
masm_->Bind(&veneer);
@@ -419,7 +420,7 @@
generate_veneers = true;
}
}
- if (generate_veneers) {
+ if (!IsVeneerPoolBlocked() && generate_veneers) {
{
ExactAssemblyScopeWithoutPoolsCheck
guard(this,
@@ -433,7 +434,8 @@
// Check if the macro-assembler's internal literal pool should be emitted
// to avoid any overflow. If we already generated the veneers, we can
// emit the pool (the branch is already done).
- if ((target > literal_target) || (option == kNoBranchRequired)) {
+ if (!IsLiteralPoolBlocked() &&
+ ((target > literal_target) || (option == kNoBranchRequired))) {
EmitLiteralPool(option);
}
BindHelper(&after_pools);
@@ -458,6 +460,7 @@
void MacroAssembler::EmitLiteralPool(LiteralPool* const literal_pool,
EmitOption option) {
+ VIXL_ASSERT(!IsLiteralPoolBlocked());
if (literal_pool->GetSize() > 0) {
#ifdef VIXL_DEBUG
for (LiteralPool::RawLiteralListIterator literal_it =
@@ -494,148 +497,6 @@
}
-void MacroAssembler::Switch(Register reg, JumpTableBase* table) {
- // 32-bit table A32:
- // adr ip, table
- // add ip, r1, lsl 2
- // ldr ip, [ip]
- // jmp: add pc, pc, ip, lsl 2
- // table:
- // .int (case_0 - (jmp + 8)) >> 2
- // .int (case_1 - (jmp + 8)) >> 2
- // .int (case_2 - (jmp + 8)) >> 2
-
- // 16-bit table T32:
- // adr ip, table
- // jmp: tbh ip, r1
- // table:
- // .short (case_0 - (jmp + 4)) >> 1
- // .short (case_1 - (jmp + 4)) >> 1
- // .short (case_2 - (jmp + 4)) >> 1
- // case_0:
- // ...
- // b end_switch
- // case_1:
- // ...
- // b end_switch
- // ...
- // end_switch:
- Label jump_table;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- int table_size = AlignUp(table->GetTableSizeInBytes(), 4);
-
- // Jump to default if reg is not in [0, table->GetLength()[
- Cmp(reg, table->GetLength());
- B(ge, table->GetDefaultLabel());
-
- Adr(scratch, &jump_table);
- if (IsUsingA32()) {
- Add(scratch, scratch, Operand(reg, LSL, table->GetOffsetShift()));
- switch (table->GetOffsetShift()) {
- case 0:
- Ldrb(scratch, MemOperand(scratch));
- break;
- case 1:
- Ldrh(scratch, MemOperand(scratch));
- break;
- case 2:
- Ldr(scratch, MemOperand(scratch));
- break;
- default:
- VIXL_ABORT_WITH_MSG("Unsupported jump table size.\n");
- }
- // Emit whatever needs to be emitted if we want to
- // correctly record the position of the branch instruction
- uint32_t branch_location = GetCursorOffset();
- table->SetBranchLocation(branch_location + GetArchitectureStatePCOffset());
- ExactAssemblyScope scope(this,
- table_size + kA32InstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- add(pc, pc, Operand(scratch, LSL, 2));
- VIXL_ASSERT((GetCursorOffset() - branch_location) == 4);
- bind(&jump_table);
- GenerateSwitchTable(table, table_size);
- } else {
- // Thumb mode - We have tbb and tbh to do this for 8 or 16bit offsets.
- // But for 32bit offsets, we use the same coding as for A32
- if (table->GetOffsetShift() == 2) {
- // 32bit offsets
- Add(scratch, scratch, Operand(reg, LSL, 2));
- Ldr(scratch, MemOperand(scratch));
- // Cannot use add pc, pc, r lsl 1 as this is unpredictable in T32,
- // so let's do the shift before
- Lsl(scratch, scratch, 1);
- // Emit whatever needs to be emitted if we want to
- // correctly record the position of the branch instruction
- uint32_t branch_location = GetCursorOffset();
- table->SetBranchLocation(branch_location +
- GetArchitectureStatePCOffset());
- ExactAssemblyScope scope(this,
- table_size + kMaxInstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- add(pc, pc, scratch);
- // add pc, pc, rm fits in 16bit T2 (except for rm = sp)
- VIXL_ASSERT((GetCursorOffset() - branch_location) == 2);
- bind(&jump_table);
- GenerateSwitchTable(table, table_size);
- } else {
- VIXL_ASSERT((table->GetOffsetShift() == 0) ||
- (table->GetOffsetShift() == 1));
- // Emit whatever needs to be emitted if we want to
- // correctly record the position of the branch instruction
- uint32_t branch_location = GetCursorOffset();
- table->SetBranchLocation(branch_location +
- GetArchitectureStatePCOffset());
- ExactAssemblyScope scope(this,
- table_size + kMaxInstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- if (table->GetOffsetShift() == 0) {
- // 8bit offsets
- tbb(scratch, reg);
- } else {
- // 16bit offsets
- tbh(scratch, reg);
- }
- // tbb/tbh is a 32bit instruction
- VIXL_ASSERT((GetCursorOffset() - branch_location) == 4);
- bind(&jump_table);
- GenerateSwitchTable(table, table_size);
- }
- }
-}
-
-
-void MacroAssembler::GenerateSwitchTable(JumpTableBase* table, int table_size) {
- table->BindTable(GetCursorOffset());
- for (int i = 0; i < table_size / 4; i++) {
- GetBuffer()->Emit32(0);
- }
-}
-
-
-// switch/case/default : case
-// case_index is assumed to be < table->GetLength()
-// which is checked in JumpTable::Link and Table::SetPresenceBit
-void MacroAssembler::Case(JumpTableBase* table, int case_index) {
- table->Link(this, case_index, GetCursorOffset());
- table->SetPresenceBitForCase(case_index);
-}
-
-// switch/case/default : default
-void MacroAssembler::Default(JumpTableBase* table) {
- Bind(table->GetDefaultLabel());
-}
-
-// switch/case/default : break
-void MacroAssembler::Break(JumpTableBase* table) { B(table->GetEndLabel()); }
-
-// switch/case/default : finalize
-// Manage the default path, mosstly. All empty offsets in the jumptable
-// will point to default.
-// All values not in [0, table->GetLength()[ are already pointing here anyway.
-void MacroAssembler::EndSwitch(JumpTableBase* table) { table->Finalize(this); }
-
void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond,
Register tmp,
uint32_t imm) {
diff --git a/src/aarch32/macro-assembler-aarch32.h b/src/aarch32/macro-assembler-aarch32.h
index 706b5a5..2bdf809 100644
--- a/src/aarch32/macro-assembler-aarch32.h
+++ b/src/aarch32/macro-assembler-aarch32.h
@@ -39,7 +39,6 @@
namespace vixl {
namespace aarch32 {
-class JumpTableBase;
class UseScratchRegisterScope;
enum FlagsUpdate { LeaveFlags = 0, SetFlags = 1, DontCare = 2 };
@@ -124,19 +123,8 @@
return this;
}
- virtual void BlockPools() VIXL_OVERRIDE {
- literal_pool_manager_.Block();
- veneer_pool_manager_.Block();
- }
- virtual void ReleasePools() VIXL_OVERRIDE {
- literal_pool_manager_.Release();
- veneer_pool_manager_.Release();
- }
- virtual void EnsureEmitPoolsFor(size_t size) VIXL_OVERRIDE {
- // TODO: Optimise this. It also checks that there is space in the buffer,
- // which we do not need to do here.
- VIXL_ASSERT(IsUint32(size));
- EnsureEmitFor(static_cast<uint32_t>(size));
+ virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
+ return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
}
private:
@@ -414,6 +402,37 @@
void PerformEnsureEmit(Label::Offset target, uint32_t extra_size);
protected:
+ virtual void BlockPools() VIXL_OVERRIDE {
+ BlockLiteralPool();
+ BlockVeneerPool();
+ }
+ virtual void ReleasePools() VIXL_OVERRIDE {
+ ReleaseLiteralPool();
+ ReleaseVeneerPool();
+ }
+ virtual void EnsureEmitPoolsFor(size_t size) VIXL_OVERRIDE {
+ // TODO: Optimise this. It also checks that there is space in the buffer,
+ // which we do not need to do here.
+ VIXL_ASSERT(IsUint32(size));
+ EnsureEmitFor(static_cast<uint32_t>(size));
+ }
+
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
+ allow_macro_instructions_ = value;
+ }
+
+ void BlockLiteralPool() { literal_pool_manager_.Block(); }
+ void ReleaseLiteralPool() { literal_pool_manager_.Release(); }
+ bool IsLiteralPoolBlocked() const {
+ return literal_pool_manager_.IsBlocked();
+ }
+ void BlockVeneerPool() { veneer_pool_manager_.Block(); }
+ void ReleaseVeneerPool() { veneer_pool_manager_.Release(); }
+ bool IsVeneerPoolBlocked() const { return veneer_pool_manager_.IsBlocked(); }
+
void HandleOutOfBoundsImmediate(Condition cond, Register tmp, uint32_t imm);
void PadToMinimumBranchRange(Label* label);
@@ -435,7 +454,8 @@
ITScope it_scope(this, &c);
instr_callback.emit(this, c, literal);
}
- if (!literal->IsManuallyPlaced() && !literal->IsBound()) {
+ if (!literal->IsManuallyPlaced() && !literal->IsBound() &&
+ !IsLiteralPoolBlocked()) {
if (WasInsertedTooFar(literal)) {
// The instruction's data is too far: revert the emission
GetBuffer()->Rewind(cursor);
@@ -500,12 +520,6 @@
bool GenerateSimulatorCode() const { return generate_simulator_code_; }
- // Tell whether any of the macro instruction can be used. When false the
- // MacroAssembler will assert if a method which can emit a variable number
- // of instructions is called.
- virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
- allow_macro_instructions_ = value;
- }
virtual bool AllowMacroInstructions() const VIXL_OVERRIDE {
return allow_macro_instructions_;
}
@@ -720,7 +734,7 @@
// delete'd.
void EmitLiteralPool(LiteralPool* const literal_pool, EmitOption option);
void EmitLiteralPool(EmitOption option = kBranchRequired) {
- VIXL_ASSERT(!literal_pool_manager_.IsBlocked());
+ VIXL_ASSERT(!IsLiteralPoolBlocked());
EmitLiteralPool(literal_pool_manager_.GetLiteralPool(), option);
literal_pool_manager_.ResetCheckpoint();
ComputeCheckpoint();
@@ -892,13 +906,6 @@
void Vmov(Condition cond, SRegister rt, float v) { Vmov(cond, F32, rt, v); }
void Vmov(SRegister rt, float v) { Vmov(al, F32, rt, v); }
- void Switch(Register reg, JumpTableBase* table);
- void GenerateSwitchTable(JumpTableBase* table, int table_size);
- void Case(JumpTableBase* table, int case_index);
- void Break(JumpTableBase* table);
- void Default(JumpTableBase* table);
- void EndSwitch(JumpTableBase* table);
-
// Claim memory on the stack.
// Note that the Claim, Drop, and Peek helpers below ensure that offsets used
// are multiples of 32 bits to help maintain 32-bit SP alignment.
@@ -1232,16 +1239,6 @@
Adds(al, rd, rn, operand);
}
- void Adr(Condition cond, Register rd, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- adr(cond, rd, label);
- }
- void Adr(Register rd, Label* label) { Adr(al, rd, label); }
-
void And(Condition cond, Register rd, Register rn, const Operand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
VIXL_ASSERT(!AliasesAvailableScratchRegister(rn));
@@ -2150,15 +2147,6 @@
}
void Ldr(Register rt, const MemOperand& operand) { Ldr(al, rt, operand); }
- void Ldr(Condition cond, Register rt, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldr(cond, rt, label);
- }
- void Ldr(Register rt, Label* label) { Ldr(al, rt, label); }
void Ldrb(Condition cond, Register rt, const MemOperand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
@@ -2182,15 +2170,6 @@
}
void Ldrb(Register rt, const MemOperand& operand) { Ldrb(al, rt, operand); }
- void Ldrb(Condition cond, Register rt, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldrb(cond, rt, label);
- }
- void Ldrb(Register rt, Label* label) { Ldrb(al, rt, label); }
void Ldrd(Condition cond,
Register rt,
@@ -2209,18 +2188,6 @@
Ldrd(al, rt, rt2, operand);
}
- void Ldrd(Condition cond, Register rt, Register rt2, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldrd(cond, rt, rt2, label);
- }
- void Ldrd(Register rt, Register rt2, Label* label) {
- Ldrd(al, rt, rt2, label);
- }
void Ldrex(Condition cond, Register rt, const MemOperand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
@@ -2298,15 +2265,6 @@
}
void Ldrh(Register rt, const MemOperand& operand) { Ldrh(al, rt, operand); }
- void Ldrh(Condition cond, Register rt, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldrh(cond, rt, label);
- }
- void Ldrh(Register rt, Label* label) { Ldrh(al, rt, label); }
void Ldrsb(Condition cond, Register rt, const MemOperand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
@@ -2325,15 +2283,6 @@
}
void Ldrsb(Register rt, const MemOperand& operand) { Ldrsb(al, rt, operand); }
- void Ldrsb(Condition cond, Register rt, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldrsb(cond, rt, label);
- }
- void Ldrsb(Register rt, Label* label) { Ldrsb(al, rt, label); }
void Ldrsh(Condition cond, Register rt, const MemOperand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
@@ -2352,15 +2301,6 @@
}
void Ldrsh(Register rt, const MemOperand& operand) { Ldrsh(al, rt, operand); }
- void Ldrsh(Condition cond, Register rt, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- ldrsh(cond, rt, label);
- }
- void Ldrsh(Register rt, Label* label) { Ldrsh(al, rt, label); }
void Lsl(Condition cond, Register rd, Register rm, const Operand& operand) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
@@ -7483,21 +7423,6 @@
Vldmia(al, kDataTypeValueNone, rn, write_back, sreglist);
}
- void Vldr(Condition cond, DataType dt, DRegister rd, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- vldr(cond, dt, rd, label);
- }
- void Vldr(DataType dt, DRegister rd, Label* label) {
- Vldr(al, dt, rd, label);
- }
- void Vldr(Condition cond, DRegister rd, Label* label) {
- Vldr(cond, Untyped64, rd, label);
- }
- void Vldr(DRegister rd, Label* label) { Vldr(al, Untyped64, rd, label); }
void Vldr(Condition cond,
DataType dt,
@@ -7521,21 +7446,6 @@
Vldr(al, Untyped64, rd, operand);
}
- void Vldr(Condition cond, DataType dt, SRegister rd, Label* label) {
- VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond);
- vldr(cond, dt, rd, label);
- }
- void Vldr(DataType dt, SRegister rd, Label* label) {
- Vldr(al, dt, rd, label);
- }
- void Vldr(Condition cond, SRegister rd, Label* label) {
- Vldr(cond, Untyped32, rd, label);
- }
- void Vldr(SRegister rd, Label* label) { Vldr(al, Untyped32, rd, label); }
void Vldr(Condition cond,
DataType dt,
@@ -11084,111 +10994,6 @@
}
};
-class JumpTableBase {
- protected:
- JumpTableBase(int len, int offset_size)
- : table_location_(Label::kMaxOffset),
- branch_location_(Label::kMaxOffset),
- length_(len),
- offset_shift_(WhichPowerOf2(offset_size)),
- presence_(length_) {
- VIXL_ASSERT((length_ >= 0) && (offset_size <= 4));
- }
- virtual ~JumpTableBase() {}
-
- public:
- int GetTableSizeInBytes() const { return length_ * (1 << offset_shift_); }
- int GetOffsetShift() const { return offset_shift_; }
- int GetLength() const { return length_; }
- Label* GetDefaultLabel() { return &default_; }
- Label* GetEndLabel() { return &end_; }
- void SetBranchLocation(uint32_t branch_location) {
- branch_location_ = branch_location;
- }
- uint32_t GetBranchLocation() const { return branch_location_; }
- void BindTable(uint32_t location) { table_location_ = location; }
- virtual void Link(MacroAssembler* masm,
- int case_index,
- uint32_t location) = 0;
-
- uint32_t GetLocationForCase(int i) {
- VIXL_ASSERT((i >= 0) && (i < length_));
- return table_location_ + (i * (1 << offset_shift_));
- }
- void SetPresenceBitForCase(int i) {
- VIXL_ASSERT((i >= 0) && (i < length_));
- presence_.Set(i);
- }
-
- void Finalize(MacroAssembler* masm) {
- if (!default_.IsBound()) {
- masm->Bind(&default_);
- }
- masm->Bind(&end_);
-
- presence_.ForEachBitNotSet(LinkIt(this, masm, default_.GetLocation()));
- }
-
- private:
- uint32_t table_location_;
- uint32_t branch_location_;
- const int length_;
- const int offset_shift_;
- BitField presence_;
- Label default_;
- Label end_;
- struct LinkIt {
- JumpTableBase* table_;
- MacroAssembler* const masm_;
- const uint32_t location_;
- LinkIt(JumpTableBase* table, MacroAssembler* const masm, uint32_t location)
- : table_(table), masm_(masm), location_(location) {}
- bool execute(int id) const {
- VIXL_ASSERT(id < table_->GetLength());
- table_->Link(masm_, static_cast<int>(id), location_);
- return true;
- }
- };
-};
-
-// JumpTable<T>(len): Helper to describe a jump table
-// len here describes the number of possible case. Values in [0, n[ can have a
-// jump offset. Any other value will assert.
-template <typename T>
-class JumpTable : public JumpTableBase {
- protected:
- explicit JumpTable(int length) : JumpTableBase(length, sizeof(T)) {}
-
- public:
- virtual void Link(MacroAssembler* masm,
- int case_index,
- uint32_t location) VIXL_OVERRIDE {
- uint32_t position_in_table = GetLocationForCase(case_index);
- uint32_t from = GetBranchLocation();
- int offset = location - from;
- T* case_offset = masm->GetBuffer()->GetOffsetAddress<T*>(position_in_table);
- if (masm->IsUsingT32()) {
- *case_offset = offset >> 1;
- } else {
- *case_offset = offset >> 2;
- }
- }
-};
-
-class JumpTable8bitOffset : public JumpTable<uint8_t> {
- public:
- explicit JumpTable8bitOffset(int length) : JumpTable<uint8_t>(length) {}
-};
-
-class JumpTable16bitOffset : public JumpTable<uint16_t> {
- public:
- explicit JumpTable16bitOffset(int length) : JumpTable<uint16_t>(length) {}
-};
-
-class JumpTable32bitOffset : public JumpTable<uint32_t> {
- public:
- explicit JumpTable32bitOffset(int length) : JumpTable<uint32_t>(length) {}
-};
} // namespace aarch32
} // namespace vixl
diff --git a/src/aarch64/assembler-aarch64.h b/src/aarch64/assembler-aarch64.h
index ab07d7a..4817eef 100644
--- a/src/aarch64/assembler-aarch64.h
+++ b/src/aarch64/assembler-aarch64.h
@@ -400,7 +400,7 @@
// Assembler.
-class Assembler : public internal::AssemblerBase {
+class Assembler : public vixl::internal::AssemblerBase {
public:
explicit Assembler(
PositionIndependentCodeOption pic = PositionIndependentCode)
@@ -3081,8 +3081,8 @@
(GetPic() == PositionDependentCode);
}
- static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
- return reg.Is64Bits() ? xzr : wzr;
+ static Register AppropriateZeroRegFor(const CPURegister& reg) {
+ return reg.Is64Bits() ? Register(xzr) : Register(wzr);
}
protected:
diff --git a/src/aarch64/macro-assembler-aarch64.h b/src/aarch64/macro-assembler-aarch64.h
index 6c626c7..eec1ae8 100644
--- a/src/aarch64/macro-assembler-aarch64.h
+++ b/src/aarch64/macro-assembler-aarch64.h
@@ -576,7 +576,7 @@
PositionIndependentCodeOption pic = PositionIndependentCode);
~MacroAssembler();
- virtual internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
+ virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
return this;
}
@@ -1304,7 +1304,7 @@
SingleEmissionCheckScope guard(this);
fminnm(vd, vn, vm);
}
- void Fmov(VRegister vd, VRegister vn) {
+ void Fmov(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
// Only emit an instruction if vd and vn are different, and they are both D
@@ -1315,12 +1315,18 @@
fmov(vd, vn);
}
}
- void Fmov(VRegister vd, Register rn) {
+ void Fmov(const VRegister& vd, const Register& rn) {
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(!rn.IsZero());
SingleEmissionCheckScope guard(this);
fmov(vd, rn);
}
+ void Fmov(const VRegister& vd, const XRegister& xn) {
+ Fmov(vd, Register(xn));
+ }
+ void Fmov(const VRegister& vd, const WRegister& wn) {
+ Fmov(vd, Register(wn));
+ }
void Fmov(const VRegister& vd, int index, const Register& rn) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
@@ -2833,37 +2839,20 @@
// one instruction. Refer to the implementation for details.
void BumpSystemStackPointer(const Operand& space);
- virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
- allow_macro_instructions_ = value;
- }
-
virtual bool AllowMacroInstructions() const VIXL_OVERRIDE {
return allow_macro_instructions_;
}
+ virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
+ return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
+ }
+
void SetGenerateSimulatorCode(bool value) {
generate_simulator_code_ = value;
}
bool GenerateSimulatorCode() const { return generate_simulator_code_; }
- void BlockLiteralPool() { literal_pool_.Block(); }
- void ReleaseLiteralPool() { literal_pool_.Release(); }
- bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
- void BlockVeneerPool() { veneer_pool_.Block(); }
- void ReleaseVeneerPool() { veneer_pool_.Release(); }
- bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
-
- virtual void BlockPools() VIXL_OVERRIDE {
- BlockLiteralPool();
- BlockVeneerPool();
- }
-
- virtual void ReleasePools() VIXL_OVERRIDE {
- ReleaseLiteralPool();
- ReleaseVeneerPool();
- }
-
size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
return GetLiteralPoolSize();
@@ -3037,6 +3026,34 @@
#endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
protected:
+ void BlockLiteralPool() { literal_pool_.Block(); }
+ void ReleaseLiteralPool() { literal_pool_.Release(); }
+ bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
+ void BlockVeneerPool() { veneer_pool_.Block(); }
+ void ReleaseVeneerPool() { veneer_pool_.Release(); }
+ bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
+
+ virtual void BlockPools() VIXL_OVERRIDE {
+ BlockLiteralPool();
+ BlockVeneerPool();
+ }
+
+ virtual void ReleasePools() VIXL_OVERRIDE {
+ ReleaseLiteralPool();
+ ReleaseVeneerPool();
+ }
+
+ // The scopes below need to able to block and release a particular pool.
+ // TODO: Consider removing those scopes or move them to
+ // code-generation-scopes-vixl.h.
+ friend class BlockPoolsScope;
+ friend class BlockLiteralPoolScope;
+ friend class BlockVeneerPoolScope;
+
+ virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
+ allow_macro_instructions_ = value;
+ }
+
// Helper used to query information about code generation and to generate
// code for `csel`.
// Here and for the related helpers below:
diff --git a/src/aarch64/operands-aarch64.h b/src/aarch64/operands-aarch64.h
index 90c6f59..dff7379 100644
--- a/src/aarch64/operands-aarch64.h
+++ b/src/aarch64/operands-aarch64.h
@@ -266,6 +266,38 @@
};
+namespace internal {
+
+template <int size_in_bits>
+class FixedSizeRegister : public Register {
+ public:
+ FixedSizeRegister() : Register() {}
+ explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) {
+ VIXL_ASSERT(IsValidRegister());
+ }
+ explicit FixedSizeRegister(const Register& other)
+ : Register(other.GetCode(), size_in_bits) {
+ VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
+ VIXL_ASSERT(IsValidRegister());
+ }
+ explicit FixedSizeRegister(const CPURegister& other)
+ : Register(other.GetCode(), other.GetSizeInBits()) {
+ VIXL_ASSERT(other.GetType() == kRegister);
+ VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
+ VIXL_ASSERT(IsValidRegister());
+ }
+
+ bool IsValid() const {
+ return Register::IsValid() && (GetSizeInBits() == size_in_bits);
+ }
+};
+
+} // namespace internal
+
+typedef internal::FixedSizeRegister<kXRegSize> XRegister;
+typedef internal::FixedSizeRegister<kWRegSize> WRegister;
+
+
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(), lanes_(1) {}
@@ -409,13 +441,13 @@
const CPURegister NoCPUReg;
-#define DEFINE_REGISTERS(N) \
- const Register w##N(N, kWRegSize); \
- const Register x##N(N, kXRegSize);
+#define DEFINE_REGISTERS(N) \
+ const WRegister w##N(N); \
+ const XRegister x##N(N);
AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-const Register wsp(kSPRegInternalCode, kWRegSize);
-const Register sp(kSPRegInternalCode, kXRegSize);
+const WRegister wsp(kSPRegInternalCode);
+const XRegister sp(kSPRegInternalCode);
#define DEFINE_VREGISTERS(N) \
@@ -429,12 +461,12 @@
#undef DEFINE_VREGISTERS
-// Registers aliases.
-const Register ip0 = x16;
-const Register ip1 = x17;
-const Register lr = x30;
-const Register xzr = x31;
-const Register wzr = w31;
+// Register aliases.
+const XRegister ip0 = x16;
+const XRegister ip1 = x17;
+const XRegister lr = x30;
+const XRegister xzr = x31;
+const WRegister wzr = w31;
// AreAliased returns true if any of the named registers overlap. Arguments
diff --git a/src/assembler-base-vixl.h b/src/assembler-base-vixl.h
index 5b838d8..ee54dcb 100644
--- a/src/assembler-base-vixl.h
+++ b/src/assembler-base-vixl.h
@@ -30,6 +30,9 @@
#include "code-buffer-vixl.h"
namespace vixl {
+
+class CodeBufferCheckScope;
+
namespace internal {
class AssemblerBase {
@@ -61,9 +64,13 @@
CodeBuffer* GetBuffer() { return &buffer_; }
const CodeBuffer& GetBuffer() const { return buffer_; }
bool AllowAssembler() const { return allow_assembler_; }
- void SetAllowAssembler(bool allow) { allow_assembler_ = allow; }
protected:
+ void SetAllowAssembler(bool allow) { allow_assembler_ = allow; }
+
+ // CodeBufferCheckScope must be able to temporarily allow the assembler.
+ friend class vixl::CodeBufferCheckScope;
+
// Buffer where the code is emitted.
CodeBuffer buffer_;
diff --git a/src/code-generation-scopes-vixl.h b/src/code-generation-scopes-vixl.h
index 7e47d5e..b7ea2d9 100644
--- a/src/code-generation-scopes-vixl.h
+++ b/src/code-generation-scopes-vixl.h
@@ -163,12 +163,21 @@
virtual ~EmissionCheckScope() { Close(); }
- enum PoolPolicy { kIgnorePools, kCheckPools };
+ enum PoolPolicy {
+ // Do not forbid pool emission inside the scope. Pools will not be emitted
+ // on `Open` either.
+ kIgnorePools,
+ // Force pools to be generated on `Open` if necessary and block their
+ // emission inside the scope.
+ kBlockPools,
+ // Deprecated, but kept for backward compatibility.
+ kCheckPools = kBlockPools
+ };
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
- Open(masm, size, size_policy, kCheckPools);
+ Open(masm, size, size_policy, kBlockPools);
}
void Close() {
@@ -183,7 +192,7 @@
// - Check the code generation limit was not exceeded.
// - Release the pools.
CodeBufferCheckScope::Close();
- if (pool_policy_ == kCheckPools) {
+ if (pool_policy_ == kBlockPools) {
masm_->ReleasePools();
}
VIXL_ASSERT(!initialised_);
@@ -202,7 +211,7 @@
}
masm_ = masm;
pool_policy_ = pool_policy;
- if (pool_policy_ == kCheckPools) {
+ if (pool_policy_ == kBlockPools) {
// To avoid duplicating the work to check that enough space is available
// in the buffer, do not use the more generic `EnsureEmitFor()`. It is
// done below when opening `CodeBufferCheckScope`.
@@ -241,15 +250,8 @@
// constructed.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
- SizePolicy assert_policy = kExactSize)
- : EmissionCheckScope(masm, size, assert_policy) {
- VIXL_ASSERT(assert_policy != kNoAssert);
-#ifdef VIXL_DEBUG
- previous_allow_macro_assembler_ = masm->AllowMacroInstructions();
- masm->SetAllowMacroInstructions(false);
-#else
- USE(previous_allow_macro_assembler_);
-#endif
+ SizePolicy size_policy = kExactSize) {
+ Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
@@ -257,10 +259,28 @@
// scope.
ExactAssemblyScope() {}
- virtual ~ExactAssemblyScope() {
+ virtual ~ExactAssemblyScope() { Close(); }
+
+ void Open(MacroAssemblerInterface* masm,
+ size_t size,
+ SizePolicy size_policy = kExactSize) {
+ Open(masm, size, size_policy, kBlockPools);
+ }
+
+ void Close() {
+ if (!initialised_) {
+ return;
+ }
+ if (masm_ == NULL) {
+ // Nothing to do.
+ return;
+ }
#ifdef VIXL_DEBUG
masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_);
+#else
+ USE(previous_allow_macro_assembler_);
#endif
+ EmissionCheckScope::Close();
}
protected:
@@ -270,9 +290,22 @@
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy assert_policy,
- PoolPolicy pool_policy)
- : EmissionCheckScope(masm, size, assert_policy, pool_policy) {
- VIXL_ASSERT(assert_policy != kNoAssert);
+ PoolPolicy pool_policy) {
+ Open(masm, size, assert_policy, pool_policy);
+ }
+
+ void Open(MacroAssemblerInterface* masm,
+ size_t size,
+ SizePolicy size_policy,
+ PoolPolicy pool_policy) {
+ VIXL_ASSERT(size_policy != kNoAssert);
+ if (masm == NULL) {
+ // Nothing to do.
+ return;
+ }
+ // Rely on EmissionCheckScope::Open to initialise `masm_` and
+ // `pool_policy_`.
+ EmissionCheckScope::Open(masm, size, size_policy, pool_policy);
#ifdef VIXL_DEBUG
previous_allow_macro_assembler_ = masm->AllowMacroInstructions();
masm->SetAllowMacroInstructions(false);
diff --git a/src/macro-assembler-interface.h b/src/macro-assembler-interface.h
index e1b1cca..0d4d078 100644
--- a/src/macro-assembler-interface.h
+++ b/src/macro-assembler-interface.h
@@ -38,11 +38,19 @@
virtual ~MacroAssemblerInterface() {}
virtual bool AllowMacroInstructions() const = 0;
+ virtual bool ArePoolsBlocked() const = 0;
+
+ protected:
virtual void SetAllowMacroInstructions(bool allow) = 0;
virtual void BlockPools() = 0;
virtual void ReleasePools() = 0;
virtual void EnsureEmitPoolsFor(size_t size) = 0;
+
+ // The following scopes need access to the above method in order to implement
+ // pool blocking and temporarily disable the macro-assembler.
+ friend class ExactAssemblyScope;
+ friend class EmissionCheckScope;
};
} // namespace vixl
diff --git a/test/aarch32/test-assembler-aarch32.cc b/test/aarch32/test-assembler-aarch32.cc
index f344ab5..3eef924 100644
--- a/test/aarch32/test-assembler-aarch32.cc
+++ b/test/aarch32/test-assembler-aarch32.cc
@@ -41,139 +41,133 @@
#define STRINGIFY(x) #x
#ifdef VIXL_INCLUDE_TARGET_A32_ONLY
-#define TEST_T32(Name) \
-void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
+#define TEST_T32(Name) \
+ void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
#else
// Tests declared with this macro will only target T32.
-#define TEST_T32(Name) \
-void Test##Name##Impl(InstructionSet isa); \
-void Test##Name() { \
- Test##Name##Impl(T32); \
-} \
-Test test_##Name(STRINGIFY(AARCH32_T32_##Name), &Test##Name); \
-void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
+#define TEST_T32(Name) \
+ void Test##Name##Impl(InstructionSet isa); \
+ void Test##Name() { Test##Name##Impl(T32); } \
+ Test test_##Name(STRINGIFY(AARCH32_T32_##Name), &Test##Name); \
+ void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
#endif
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
-#define TEST_A32(Name) \
-void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
+#define TEST_A32(Name) \
+ void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
#else
// Test declared with this macro will only target A32.
-#define TEST_A32(Name) \
-void Test##Name##Impl(InstructionSet isa); \
-void Test##Name() { \
- Test##Name##Impl(A32); \
-} \
-Test test_##Name(STRINGIFY(AARCH32_A32_##Name), &Test##Name); \
-void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
+#define TEST_A32(Name) \
+ void Test##Name##Impl(InstructionSet isa); \
+ void Test##Name() { Test##Name##Impl(A32); } \
+ Test test_##Name(STRINGIFY(AARCH32_A32_##Name), &Test##Name); \
+ void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
#endif
// Tests declared with this macro will be run twice: once targeting A32 and
// once targeting T32.
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
-#define TEST(Name) TEST_A32(Name)
+#define TEST(Name) TEST_A32(Name)
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
-#define TEST(Name) TEST_T32(Name)
+#define TEST(Name) TEST_T32(Name)
#else
-#define TEST(Name) \
-void Test##Name##Impl(InstructionSet isa); \
-void Test##Name() { \
- Test##Name##Impl(A32); \
- printf(" > A32 done\n"); \
- Test##Name##Impl(T32); \
- printf(" > T32 done\n"); \
-} \
-Test test_##Name(STRINGIFY(AARCH32_ASM_##Name), &Test##Name); \
-void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
+#define TEST(Name) \
+ void Test##Name##Impl(InstructionSet isa); \
+ void Test##Name() { \
+ Test##Name##Impl(A32); \
+ printf(" > A32 done\n"); \
+ Test##Name##Impl(T32); \
+ printf(" > T32 done\n"); \
+ } \
+ Test test_##Name(STRINGIFY(AARCH32_ASM_##Name), &Test##Name); \
+ void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
#endif
// Tests declared with this macro are not expected to use any provided test
// helpers such as SETUP, RUN, etc.
-#define TEST_NOASM(Name) \
-void Test##Name(); \
-Test test_##Name(STRINGIFY(AARCH32_##Name), &Test##Name); \
-void Test##Name()
+#define TEST_NOASM(Name) \
+ void Test##Name(); \
+ Test test_##Name(STRINGIFY(AARCH32_##Name), &Test##Name); \
+ void Test##Name()
#define __ masm.
#define BUF_SIZE (4096)
-#define ASSERT_LITERAL_POOL_SIZE(size) \
- do { VIXL_CHECK(__ GetLiteralPoolSize() == size); } while (false)
+#define ASSERT_LITERAL_POOL_SIZE(size) \
+ do { \
+ VIXL_CHECK(__ GetLiteralPoolSize() == size); \
+ } while (false)
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// No simulator yet.
-#define SETUP() \
- MacroAssembler masm(BUF_SIZE, isa); \
+#define SETUP() MacroAssembler masm(BUF_SIZE, isa);
-#define START() \
- masm.GetBuffer()->Reset();
+#define START() masm.GetBuffer()->Reset();
-#define END() \
- __ Hlt(0); \
+#define END() \
+ __ Hlt(0); \
__ FinalizeCode();
-#define RUN() \
- DISASSEMBLE();
+#define RUN() DISASSEMBLE();
#define TEARDOWN()
#else // ifdef VIXL_INCLUDE_SIMULATOR_AARCH32.
-#define SETUP() \
- RegisterDump core; \
- MacroAssembler masm(BUF_SIZE, isa); \
- UseScratchRegisterScope harness_scratch(&masm); \
+#define SETUP() \
+ RegisterDump core; \
+ MacroAssembler masm(BUF_SIZE, isa); \
+ UseScratchRegisterScope harness_scratch(&masm); \
harness_scratch.ExcludeAll();
-#define START() \
- masm.GetBuffer()->Reset(); \
- __ Push(r4); \
- __ Push(r5); \
- __ Push(r6); \
- __ Push(r7); \
- __ Push(r8); \
- __ Push(r9); \
- __ Push(r10); \
- __ Push(r11); \
- __ Push(ip); \
- __ Push(lr); \
- __ Mov(r0, 0); \
- __ Msr(APSR_nzcvq, r0); \
+#define START() \
+ masm.GetBuffer()->Reset(); \
+ __ Push(r4); \
+ __ Push(r5); \
+ __ Push(r6); \
+ __ Push(r7); \
+ __ Push(r8); \
+ __ Push(r9); \
+ __ Push(r10); \
+ __ Push(r11); \
+ __ Push(ip); \
+ __ Push(lr); \
+ __ Mov(r0, 0); \
+ __ Msr(APSR_nzcvq, r0); \
harness_scratch.Include(ip);
-#define END() \
- harness_scratch.Exclude(ip); \
- core.Dump(&masm); \
- __ Pop(lr); \
- __ Pop(ip); \
- __ Pop(r11); \
- __ Pop(r10); \
- __ Pop(r9); \
- __ Pop(r8); \
- __ Pop(r7); \
- __ Pop(r6); \
- __ Pop(r5); \
- __ Pop(r4); \
- __ Bx(lr); \
+#define END() \
+ harness_scratch.Exclude(ip); \
+ core.Dump(&masm); \
+ __ Pop(lr); \
+ __ Pop(ip); \
+ __ Pop(r11); \
+ __ Pop(r10); \
+ __ Pop(r9); \
+ __ Pop(r8); \
+ __ Pop(r7); \
+ __ Pop(r6); \
+ __ Pop(r5); \
+ __ Pop(r4); \
+ __ Bx(lr); \
__ FinalizeCode();
// Execute the generated code from the MacroAssembler's automatic code buffer.
// Note the offset for ExecuteMemory since the PCS requires that
// the address be odd in the case of branching to T32 code.
-#define RUN() \
- DISASSEMBLE(); \
- { \
- int pcs_offset = masm.IsUsingT32() ? 1 : 0; \
- masm.GetBuffer()->SetExecutable(); \
- ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
- masm.GetSizeOfCodeGenerated(), \
- pcs_offset); \
- masm.GetBuffer()->SetWritable(); \
+#define RUN() \
+ DISASSEMBLE(); \
+ { \
+ int pcs_offset = masm.IsUsingT32() ? 1 : 0; \
+ masm.GetBuffer()->SetExecutable(); \
+ ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
+ masm.GetSizeOfCodeGenerated(), \
+ pcs_offset); \
+ masm.GetBuffer()->SetWritable(); \
}
-#define TEARDOWN() \
- harness_scratch.Close();
+#define TEARDOWN() harness_scratch.Close();
#endif // ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
@@ -194,27 +188,27 @@
#else
-#define ASSERT_EQUAL_32(expected, result) \
+#define ASSERT_EQUAL_32(expected, result) \
VIXL_CHECK(Equal32(expected, &core, result))
-#define ASSERT_EQUAL_64(expected, result) \
+#define ASSERT_EQUAL_64(expected, result) \
VIXL_CHECK(Equal64(expected, &core, result))
-#define ASSERT_EQUAL_128(expected_h, expected_l, result) \
+#define ASSERT_EQUAL_128(expected_h, expected_l, result) \
VIXL_CHECK(Equal128(expected_h, expected_l, &core, result))
-#define ASSERT_EQUAL_FP32(expected, result) \
+#define ASSERT_EQUAL_FP32(expected, result) \
VIXL_CHECK(EqualFP32(expected, &core, result))
-#define ASSERT_EQUAL_FP64(expected, result) \
+#define ASSERT_EQUAL_FP64(expected, result) \
VIXL_CHECK(EqualFP64(expected, &core, result))
-#define ASSERT_EQUAL_NZCV(expected) \
+#define ASSERT_EQUAL_NZCV(expected) \
VIXL_CHECK(EqualNzcv(expected, core.flags_nzcv()))
#endif
-#define DISASSEMBLE() \
+#define DISASSEMBLE() \
if (Test::disassemble()) { \
PrintDisassembler dis(std::cout, 0); \
if (masm.IsUsingT32()) { \
@@ -673,41 +667,67 @@
}
-// TODO: fix this test in T32.
-TEST_A32(adr) {
+TEST(adr_in_range) {
SETUP();
Label label_1, label_2, label_3, label_4;
START();
- __ Mov(r0, 0x0);
- __ Adr(r1, &label_3); // Set to zero to indicate success.
+ {
+ size_t size_of_generated_code;
+ if (masm.IsUsingA32()) {
+ size_of_generated_code = 18 * kA32InstructionSizeInBytes;
+ } else {
+ size_of_generated_code = 18 * k32BitT32InstructionSizeInBytes +
+ 3 * k16BitT32InstructionSizeInBytes;
+ }
+ ExactAssemblyScope scope(&masm,
+ size_of_generated_code,
+ ExactAssemblyScope::kExactSize);
- __ Adr(r2, &label_1); // Multiple forward references to the same label.
- __ Adr(r3, &label_1);
- __ Adr(r4, &label_1);
+ __ mov(r0, 0x0); // Set to zero to indicate success.
+ __ adr(r1, &label_3);
- __ Bind(&label_2);
- __ Eor(r5, r2, r3); // Ensure that r2,r3 and r4 are identical.
- __ Eor(r6, r2, r4);
- __ Mov(r0, r5);
- __ Mov(r0, r6);
- __ Bx(r2); // label_1, label_3
+ __ adr(r2, &label_1); // Multiple forward references to the same label.
+ __ adr(r3, &label_1);
+ __ adr(r4, &label_1);
- __ Bind(&label_3);
- __ Adr(r2, &label_3); // Self-reference (offset 0).
- __ Eor(r1, r1, r2);
- __ Adr(r2, &label_4); // Simple forward reference.
- __ Bx(r2); // label_4
+ __ bind(&label_2);
+ __ eor(r5, r2, r3); // Ensure that r2, r3 and r4 are identical.
+ __ eor(r6, r2, r4);
+ __ orr(r0, r5, r6);
+ if (masm.IsUsingT32()) {
+ // The jump target needs to have its least significant bit set to indicate
+ // that we are jumping into thumb mode.
+ __ orr(r2, r2, 1);
+ }
+ __ bx(r2); // label_1, label_3
- __ Bind(&label_1);
- __ Adr(r2, &label_3); // Multiple reverse references to the same label.
- __ Adr(r3, &label_3);
- __ Adr(r4, &label_3);
- __ Adr(r5, &label_2); // Simple reverse reference.
- __ Bx(r5); // label_2
+ __ bind(&label_3);
+ __ adr(r2, &label_3); // Self-reference (offset 0).
+ __ eor(r1, r1, r2);
+ __ adr(r2, &label_4); // Simple forward reference.
+ if (masm.IsUsingT32()) {
+ // The jump target needs to have its least significant bit set to indicate
+ // that we are jumping into thumb mode.
+ __ orr(r2, r2, 1);
+ }
+ __ bx(r2); // label_4
- __ Bind(&label_4);
+ __ bind(&label_1);
+ __ adr(r2, &label_3); // Multiple reverse references to the same label.
+ __ adr(r3, &label_3);
+ __ adr(r4, &label_3);
+ __ adr(r5, &label_2); // Simple reverse reference.
+ if (masm.IsUsingT32()) {
+ // The jump target needs to have its least significant bit set to indicate
+ // that we are jumping into thumb mode.
+ __ orr(r5, r5, 1);
+ }
+ __ bx(r5); // label_2
+
+ __ bind(&label_4);
+ }
END();
RUN();
@@ -1219,9 +1239,12 @@
void EmitReusedLoadLiteralStressTest(InstructionSet isa, bool conditional) {
- // This test stresses loading a literal that is already in the literal pool, for
- // various positionings on the existing load from that literal. We try to exercise
- // cases where the two loads result in similar checkpoints for the literal pool.
+ // This test stresses loading a literal that is already in the literal pool,
+ // for
+ // various positionings on the existing load from that literal. We try to
+ // exercise
+ // cases where the two loads result in similar checkpoints for the literal
+ // pool.
SETUP();
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
@@ -1253,9 +1276,12 @@
Literal<uint64_t> l1(0xcafebeefdeadbaba);
__ Ldr(r0, &l1);
- // Generate nops, in order to bring the checkpoints of the Ldr and Ldrd closer.
+ // Generate nops, in order to bring the checkpoints of the Ldr and Ldrd
+ // closer.
{
- ExactAssemblyScope scope(&masm, n * nop_size, ExactAssemblyScope::kExactSize);
+ ExactAssemblyScope scope(&masm,
+ n * nop_size,
+ ExactAssemblyScope::kExactSize);
for (int i = 0; i < n; ++i) {
__ nop();
}
@@ -1310,7 +1336,7 @@
Literal<uint64_t> l0(0xcafebeefdeadbaba);
__ Ldrd(r0, r1, &l0);
for (int i = 0; i < 10000; ++i) {
- __ Add (r2, r2, i);
+ __ Add(r2, r2, i);
__ Ldrd(r4, r5, &l0);
}
@@ -1332,8 +1358,6 @@
}
-
-
// Make sure calling a macro-assembler instruction will generate literal pools
// if needed.
TEST_T32(literal_pool_generated_by_macro_instruction) {
@@ -1434,20 +1458,20 @@
const int ldrd_range = masm->IsUsingA32() ? 255 : 1020;
// We want to emit code up to the maximum literal load range and ensure the
// pool has not been emitted. Compute the limit (end).
- ptrdiff_t end =
- AlignDown(
- // Align down the PC to 4 bytes as the instruction does when it's
- // executed.
- // The PC will be the cursor offset plus the architecture state PC
- // offset.
- AlignDown(masm->GetBuffer()->GetCursorOffset() +
- masm->GetArchitectureStatePCOffset(), 4) +
+ ptrdiff_t end = AlignDown(
+ // Align down the PC to 4 bytes as the instruction does when it's
+ // executed.
+ // The PC will be the cursor offset plus the architecture state PC
+ // offset.
+ AlignDown(masm->GetBuffer()->GetCursorOffset() +
+ masm->GetArchitectureStatePCOffset(),
+ 4) +
// Maximum range allowed to access the constant.
ldrd_range -
// The literal pool has a two instruction margin.
2 * kMaxInstructionSizeInBytes,
- // AlignDown to 4 byte as the literals will be 4 byte aligned.
- 4);
+ // AlignDown to 4 byte as the literals will be 4 byte aligned.
+ 4);
// Create one literal pool entry.
__ Ldrd(r0, r1, 0x1234567890abcdef);
@@ -1577,7 +1601,8 @@
// sizes close to the maximum supported offset will produce code that executes
// correctly. As the Ldrd might or might not be rewinded, we do not assert on
// the size of the literal pool in this test.
-void EmitLdrdLiteralStressTest(InstructionSet isa, bool unaligned,
+void EmitLdrdLiteralStressTest(InstructionSet isa,
+ bool unaligned,
LiteralStressTestMode test_mode) {
SETUP();
@@ -1733,7 +1758,8 @@
const int ldrd_padding = ldrd_range - 2 * kA32InstructionSizeInBytes;
const int ldr_range = 4095;
// We need to take into account the ldrd padding and the ldrd instruction.
- const int ldr_padding = ldr_range - ldrd_padding - 2 * kA32InstructionSizeInBytes;
+ const int ldr_padding =
+ ldr_range - ldrd_padding - 2 * kA32InstructionSizeInBytes;
__ Ldr(r1, 0x12121212);
ASSERT_LITERAL_POOL_SIZE(4);
@@ -1884,13 +1910,12 @@
};
-const LdrLiteralRangeTest kLdrLiteralRangeTestData[] = {
- {&MacroAssembler::Ldr, r1, 4095, 4095, 0x12345678, 0x12345678 },
- {&MacroAssembler::Ldrh, r2, 255, 4095, 0xabcdefff, 0x0000efff },
- {&MacroAssembler::Ldrsh, r3, 255, 4095, 0x00008765, 0xffff8765 },
- {&MacroAssembler::Ldrb, r4, 4095, 4095, 0x12345678, 0x00000078 },
- {&MacroAssembler::Ldrsb, r5, 255, 4095, 0x00000087, 0xffffff87 }
-};
+const LdrLiteralRangeTest kLdrLiteralRangeTestData[] =
+ {{&MacroAssembler::Ldr, r1, 4095, 4095, 0x12345678, 0x12345678},
+ {&MacroAssembler::Ldrh, r2, 255, 4095, 0xabcdefff, 0x0000efff},
+ {&MacroAssembler::Ldrsh, r3, 255, 4095, 0x00008765, 0xffff8765},
+ {&MacroAssembler::Ldrb, r4, 4095, 4095, 0x12345678, 0x00000078},
+ {&MacroAssembler::Ldrsb, r5, 255, 4095, 0x00000087, 0xffffff87}};
void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
@@ -1930,7 +1955,9 @@
int end = masm.GetCursorOffset() + space;
{
// Generate nops precisely to fill the buffer.
- ExactAssemblyScope accurate_scope(&masm, space); // This should not trigger emission of the pool.
+ ExactAssemblyScope accurate_scope(&masm, space); // This should not
+ // trigger emission of
+ // the pool.
VIXL_CHECK(!masm.LiteralPoolIsEmpty());
while (masm.GetCursorOffset() < end) {
__ nop();
@@ -1991,9 +2018,8 @@
// Generate enough instruction so that we go out of range for the load
// literal we just emitted.
- ptrdiff_t end =
- masm.GetBuffer()->GetCursorOffset() +
- ((masm.IsUsingA32()) ? test.a32_range : test.t32_range);
+ ptrdiff_t end = masm.GetBuffer()->GetCursorOffset() +
+ ((masm.IsUsingA32()) ? test.a32_range : test.t32_range);
while (masm.GetBuffer()->GetCursorOffset() < end) {
__ Mov(r0, 0);
}
@@ -2013,9 +2039,7 @@
}
-TEST(ldr_literal_range) {
- GenerateLdrLiteralRangeTest(isa, false);
-}
+TEST(ldr_literal_range) { GenerateLdrLiteralRangeTest(isa, false); }
TEST_T32(ldr_literal_range_unaligned) {
@@ -2102,7 +2126,7 @@
Literal<uint64_t> l0(0xcafebeefdeadbaba, RawLiteral::kManuallyPlaced);
Literal<int32_t> l1(0x12345678, RawLiteral::kManuallyPlaced);
- Literal<uint16_t>l2(4567, RawLiteral::kManuallyPlaced);
+ Literal<uint16_t> l2(4567, RawLiteral::kManuallyPlaced);
Literal<int16_t> l3(-4567, RawLiteral::kManuallyPlaced);
Literal<uint8_t> l4(123, RawLiteral::kManuallyPlaced);
Literal<int8_t> l5(-123, RawLiteral::kManuallyPlaced);
@@ -2196,7 +2220,7 @@
VIXL_CHECK(before.IsBound());
VIXL_CHECK(!after.IsBound());
- // Load the entries several times to test that literals can be shared.
+ // Load the entries several times to test that literals can be shared.
for (int i = 0; i < 20; i++) {
(masm.*test.instruction)(r0, &before);
(masm.*test.instruction)(r1, &after);
@@ -2704,7 +2728,6 @@
}
-
// This test check that we can update a Literal after usage.
TEST(literal_update) {
SETUP();
@@ -2714,16 +2737,16 @@
Literal<uint32_t>* a32 =
new Literal<uint32_t>(0xabcdef01, RawLiteral::kDeletedOnPoolDestruction);
Literal<uint64_t>* a64 =
- new Literal<uint64_t>(
- UINT64_C(0xabcdef01abcdef01), RawLiteral::kDeletedOnPoolDestruction);
+ new Literal<uint64_t>(UINT64_C(0xabcdef01abcdef01),
+ RawLiteral::kDeletedOnPoolDestruction);
__ Ldr(r0, a32);
__ Ldrd(r2, r3, a64);
__ EmitLiteralPool();
Literal<uint32_t>* b32 =
new Literal<uint32_t>(0x10fedcba, RawLiteral::kDeletedOnPoolDestruction);
Literal<uint64_t>* b64 =
- new Literal<uint64_t>(
- UINT64_C(0x10fedcba10fedcba), RawLiteral::kDeletedOnPoolDestruction);
+ new Literal<uint64_t>(UINT64_C(0x10fedcba10fedcba),
+ RawLiteral::kDeletedOnPoolDestruction);
__ Ldr(r1, b32);
__ Ldrd(r4, r5, b64);
// Update literals' values. "a32" and "a64" are already emitted. "b32" and
@@ -2745,101 +2768,6 @@
}
-void SwitchCase(JumpTableBase* switch_, uint32_t case_index,
- InstructionSet isa, bool bind_default = true) {
- SETUP();
-
- START();
-
- __ Mov(r0, case_index);
- __ Mov(r1, case_index);
- __ Switch(r1, switch_);
-
- __ Case(switch_, 0);
- __ Mov(r0, 1);
- __ Break(switch_);
-
- __ Case(switch_, 1);
- __ Mov(r0, 2);
- __ Break(switch_);
-
- __ Case(switch_, 2);
- __ Mov(r0, 4);
- __ Break(switch_);
-
- __ Case(switch_, 3);
- __ Mov(r0, 8);
- __ Break(switch_);
-
- if (bind_default) {
- __ Default(switch_);
- __ Mov(r0, -1);
- }
-
- __ EndSwitch(switch_);
-
-
- END();
-
- RUN();
-
- if (case_index < 4) {
- ASSERT_EQUAL_32(1 << case_index, r0);
- } else if (bind_default) {
- ASSERT_EQUAL_32(-1, r0);
- } else {
- ASSERT_EQUAL_32(case_index, r0);
- }
-}
-
-
-TEST(switch_case_8) {
- for (int i = 0; i < 5; i++) {
- JumpTable8bitOffset switch_(5);
- SwitchCase(&switch_, i, isa);
- }
-}
-
-
-TEST(switch_case_16) {
- for (int i = 0; i < 5; i++) {
- JumpTable16bitOffset switch_(5);
- SwitchCase(&switch_, i, isa);
- }
-}
-
-
-TEST(switch_case_32) {
- for (int i = 0; i < 5; i++) {
- JumpTable32bitOffset switch_(5);
- SwitchCase(&switch_, i, isa);
- }
-}
-
-
-TEST(switch_case_8_omit_default) {
- for (int i = 0; i < 5; i++) {
- JumpTable8bitOffset switch_(5);
- SwitchCase(&switch_, i, isa, false);
- }
-}
-
-
-TEST(switch_case_16_omit_default) {
- for (int i = 0; i < 5; i++) {
- JumpTable16bitOffset switch_(5);
- SwitchCase(&switch_, i, isa, false);
- }
-}
-
-
-TEST(switch_case_32_omit_default) {
- for (int i = 0; i < 5; i++) {
- JumpTable32bitOffset switch_(5);
- SwitchCase(&switch_, i, isa, false);
- }
-}
-
TEST(claim_peek_poke) {
SETUP();
@@ -2941,7 +2869,7 @@
__ Msr(APSR_nzcvqg, r0);
__ Mov(r0, sp);
__ Printf("sp=%x\n", r0);
-// __ Printf("Hello world!\n");
+ // __ Printf("Hello world!\n");
__ Mov(r0, 0x1234);
__ Mov(r1, 0x5678);
StringLiteral literal("extra string");
@@ -3079,7 +3007,7 @@
}
-template<typename T>
+template <typename T>
void CheckInstructionSetA32(const T& assm) {
VIXL_CHECK(assm.IsUsingA32());
VIXL_CHECK(!assm.IsUsingT32());
@@ -3087,7 +3015,7 @@
}
-template<typename T>
+template <typename T>
void CheckInstructionSetT32(const T& assm) {
VIXL_CHECK(assm.IsUsingT32());
VIXL_CHECK(!assm.IsUsingA32());
@@ -3143,9 +3071,9 @@
TEST_NOASM(set_isa_empty) {
- // It is possible to change the instruction set if no instructions have yet
- // been generated. This test only makes sense when both A32 and T32 are
- // supported.
+// It is possible to change the instruction set if no instructions have yet
+// been generated. This test only makes sense when both A32 and T32 are
+// supported.
#ifdef VIXL_INCLUDE_TARGET_AARCH32
Assembler assm;
CheckInstructionSetA32(assm);
@@ -3173,8 +3101,8 @@
TEST_NOASM(set_isa_noop) {
- // It is possible to call a no-op UseA32/T32 or UseInstructionSet even if
- // one or more instructions have been generated.
+// It is possible to call a no-op UseA32/T32 or UseInstructionSet even if
+// one or more instructions have been generated.
#ifdef VIXL_INCLUDE_TARGET_A32
{
Assembler assm(A32);
@@ -3315,8 +3243,8 @@
{
UseScratchRegisterScope temps(&masm);
// 'ip' is a scratch register by default.
- VIXL_CHECK(
- masm.GetScratchRegisterList()->GetList() == (1u << ip.GetCode()));
+ VIXL_CHECK(masm.GetScratchRegisterList()->GetList() ==
+ (1u << ip.GetCode()));
VIXL_CHECK(temps.IsAvailable(ip));
// Integer registers have no complicated aliasing so
@@ -3546,7 +3474,6 @@
// ratio in the sequence is fixed at 4:1 by the ratio of cases.
for (int case_count = 6; case_count < 37; case_count++) {
for (int iter = 0; iter < iterations; iter++) {
-
// Reset local state.
allbound = false;
l = new Label[label_count];
@@ -3560,7 +3487,7 @@
uint32_t label_index = static_cast<uint32_t>(mrand48()) % label_count;
switch (inst_case) {
- case 0: // Bind.
+ case 0: // Bind.
if (!l[label_index].IsBound()) {
__ Bind(&l[label_index]);
@@ -3570,15 +3497,27 @@
__ Add(r1, r1, 1);
}
break;
- case 1: // Compare and branch if zero (untaken as r0 == 1).
- case 2:
+ case 1: // Compare and branch if zero (untaken as r0 == 1).
__ Cbz(r0, &l[label_index]);
break;
- case 3: // Conditional branch (untaken as Z set) preferred near.
- case 4:
- __ BPreferNear(ne, &l[label_index]);
+ case 2: { // Compare and branch if not zero.
+ Label past_branch;
+ __ B(eq, &past_branch, kNear);
+ __ Cbnz(r0, &l[label_index]);
+ __ Bind(&past_branch);
break;
- default: // Nop.
+ }
+ case 3: { // Unconditional branch preferred near.
+ Label past_branch;
+ __ B(eq, &past_branch, kNear);
+ __ B(&l[label_index], kNear);
+ __ Bind(&past_branch);
+ break;
+ }
+ case 4: // Conditional branch (untaken as Z set) preferred near.
+ __ B(ne, &l[label_index], kNear);
+ break;
+ default: // Nop.
__ Nop();
break;
}
@@ -3617,7 +3556,9 @@
{
// Fill the buffer with nops.
- ExactAssemblyScope scope(&masm, kBaseBufferSize, ExactAssemblyScope::kExactSize);
+ ExactAssemblyScope scope(&masm,
+ kBaseBufferSize,
+ ExactAssemblyScope::kExactSize);
for (int i = 0; i < kBaseBufferSize; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
@@ -3696,24 +3637,25 @@
// Add enough nops to exceed the range of all loads.
int space = 5000;
{
- ExactAssemblyScope scope(&masm,
- space,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, space, CodeBufferCheckScope::kExactSize);
VIXL_ASSERT(masm.IsUsingT32());
for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
}
-#define ENSURE_ALIGNED() do { \
- if (!IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
- ExactAssemblyScope scope(&masm, k16BitT32InstructionSizeInBytes, \
- ExactAssemblyScope::kExactSize); \
- __ nop(); \
- } \
- VIXL_ASSERT(IsMultiple<k32BitT32InstructionSizeInBytes>( \
- masm.GetCursorOffset())); \
-} while(0)
+#define ENSURE_ALIGNED() \
+ do { \
+ if (!IsMultiple<k32BitT32InstructionSizeInBytes>( \
+ masm.GetCursorOffset())) { \
+ ExactAssemblyScope scope(&masm, \
+ k16BitT32InstructionSizeInBytes, \
+ ExactAssemblyScope::kExactSize); \
+ __ nop(); \
+ } \
+ VIXL_ASSERT( \
+ IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())); \
+ } while (0)
// The literal has already been emitted, and is out of range of all of these
// instructions. The delegates must generate fix-up code.
@@ -3770,24 +3712,24 @@
// to only a two-byte boundary.
int space = 5002;
{
- ExactAssemblyScope scope(&masm,
- space,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, space, CodeBufferCheckScope::kExactSize);
VIXL_ASSERT(masm.IsUsingT32());
for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
}
-#define ENSURE_NOT_ALIGNED() do { \
- if (IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
- ExactAssemblyScope scope(&masm, k16BitT32InstructionSizeInBytes, \
- ExactAssemblyScope::kExactSize); \
- __ nop(); \
- } \
- VIXL_ASSERT(!IsMultiple<k32BitT32InstructionSizeInBytes>( \
- masm.GetCursorOffset())); \
-} while(0)
+#define ENSURE_NOT_ALIGNED() \
+ do { \
+ if (IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
+ ExactAssemblyScope scope(&masm, \
+ k16BitT32InstructionSizeInBytes, \
+ ExactAssemblyScope::kExactSize); \
+ __ nop(); \
+ } \
+ VIXL_ASSERT( \
+ !IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())); \
+ } while (0)
// The literal has already been emitted, and is out of range of all of these
// instructions. The delegates must generate fix-up code.
@@ -3865,24 +3807,25 @@
// be generated to read the PC.
int space = 4000;
{
- ExactAssemblyScope scope(&masm,
- space,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, space, CodeBufferCheckScope::kExactSize);
VIXL_ASSERT(masm.IsUsingT32());
for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
}
-#define ENSURE_ALIGNED() do { \
- if (!IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
- ExactAssemblyScope scope(&masm, k16BitT32InstructionSizeInBytes, \
- ExactAssemblyScope::kExactSize); \
- __ nop(); \
- } \
- VIXL_ASSERT(IsMultiple<k32BitT32InstructionSizeInBytes>( \
- masm.GetCursorOffset())); \
-} while(0)
+#define ENSURE_ALIGNED() \
+ do { \
+ if (!IsMultiple<k32BitT32InstructionSizeInBytes>( \
+ masm.GetCursorOffset())) { \
+ ExactAssemblyScope scope(&masm, \
+ k16BitT32InstructionSizeInBytes, \
+ ExactAssemblyScope::kExactSize); \
+ __ nop(); \
+ } \
+ VIXL_ASSERT( \
+ IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())); \
+ } while (0)
// The literal has already been emitted, and is out of range of all of these
// instructions. The delegates must generate fix-up code.
@@ -3939,24 +3882,24 @@
// be generated to read the PC.
int space = 4000;
{
- ExactAssemblyScope scope(&masm,
- space,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, space, CodeBufferCheckScope::kExactSize);
VIXL_ASSERT(masm.IsUsingT32());
for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
}
-#define ENSURE_NOT_ALIGNED() do { \
- if (IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
- ExactAssemblyScope scope(&masm, k16BitT32InstructionSizeInBytes, \
- ExactAssemblyScope::kExactSize); \
- __ nop(); \
- } \
- VIXL_ASSERT(!IsMultiple<k32BitT32InstructionSizeInBytes>( \
- masm.GetCursorOffset())); \
-} while(0)
+#define ENSURE_NOT_ALIGNED() \
+ do { \
+ if (IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())) { \
+ ExactAssemblyScope scope(&masm, \
+ k16BitT32InstructionSizeInBytes, \
+ ExactAssemblyScope::kExactSize); \
+ __ nop(); \
+ } \
+ VIXL_ASSERT( \
+ !IsMultiple<k32BitT32InstructionSizeInBytes>(masm.GetCursorOffset())); \
+ } while (0)
// The literal has already been emitted, and is out of range of all of these
// instructions. The delegates must generate fix-up code.
@@ -4009,17 +3952,18 @@
// Refer to the literal so that it is emitted early.
__ Ldr(r0, literal);
-#define PAD_WITH_NOPS(space) do { \
- { \
- ExactAssemblyScope scope(&masm, \
- space, \
- CodeBufferCheckScope::kExactSize); \
- VIXL_ASSERT(masm.IsUsingT32()); \
- for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) { \
- __ nop(); \
- } \
- } \
-} while(0)
+#define PAD_WITH_NOPS(space) \
+ do { \
+ { \
+ ExactAssemblyScope scope(&masm, \
+ space, \
+ CodeBufferCheckScope::kExactSize); \
+ VIXL_ASSERT(masm.IsUsingT32()); \
+ for (int i = 0; i < space; i += k16BitT32InstructionSizeInBytes) { \
+ __ nop(); \
+ } \
+ } \
+ } while (0)
// Add enough nops to exceed the range of all loads.
PAD_WITH_NOPS(5000);
@@ -4750,7 +4694,7 @@
// However, as the literal emission would put veneers out of range.
VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() <
kTypicalMacroInstructionMaxSize +
- static_cast<int32_t>(masm.GetLiteralPoolSize()));
+ static_cast<int32_t>(masm.GetLiteralPoolSize()));
// This extra Nop will generate the literal pool and before that the veneer
// pool.
@@ -4839,7 +4783,9 @@
uint32_t value = 0x1234567;
Literal<uint32_t>* literal =
- new Literal<uint32_t>(value, RawLiteral::kPlacedWhenUsed, RawLiteral::kDeletedOnPoolDestruction);
+ new Literal<uint32_t>(value,
+ RawLiteral::kPlacedWhenUsed,
+ RawLiteral::kDeletedOnPoolDestruction);
__ Ldr(r11, literal);
@@ -4849,12 +4795,10 @@
const int NUM_RANGE = 58;
const int NUM1 = NUM_NOPS - NUM_RANGE;
- const int NUM2 = NUM_RANGE ;
+ const int NUM2 = NUM_RANGE;
{
- ExactAssemblyScope aas(&masm,
- 2 * NUM1,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(&masm, 2 * NUM1, CodeBufferCheckScope::kMaximumSize);
for (int i = 0; i < NUM1; i++) {
__ nop();
}
@@ -4863,18 +4807,14 @@
__ Cbz(r1, &end);
{
- ExactAssemblyScope aas(&masm,
- 2 * NUM2,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(&masm, 2 * NUM2, CodeBufferCheckScope::kMaximumSize);
for (int i = 0; i < NUM2; i++) {
__ nop();
}
}
{
- ExactAssemblyScope aas(&masm,
- 4,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(&masm, 4, CodeBufferCheckScope::kMaximumSize);
__ add(r1, r1, 3);
}
__ Bind(&end);
@@ -4904,7 +4844,6 @@
int first_test = 2000;
// Test on both sizes of the Adr range which is 4095.
for (int test = 0; test < kTestCount; test++) {
-
const int string_size = 1000; // A lot more than the cbz range.
std::string test_string(string_size, 'x');
StringLiteral big_literal(test_string.c_str());
@@ -4924,9 +4863,7 @@
__ Cbz(r1, &labels[test]);
{
- ExactAssemblyScope aas(&masm,
- 4,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(&masm, 4, CodeBufferCheckScope::kMaximumSize);
__ add(r1, r1, 3);
}
__ Bind(&labels[test]);
@@ -4964,9 +4901,7 @@
// order to reach the maximum range of ldrd and cbz at the same time.
{
int nop_size = kLdrdRange - kCbzCbnzRange - 5 * kSizeForCbz;
- ExactAssemblyScope scope(&masm,
- nop_size,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, nop_size, CodeBufferCheckScope::kExactSize);
for (int i = 0; i < nop_size; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
@@ -4988,9 +4923,7 @@
// This scope will generate both veneers (they are both out of range).
{
int nop_size = kCbzCbnzRange;
- ExactAssemblyScope scope(&masm,
- nop_size,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, nop_size, CodeBufferCheckScope::kExactSize);
for (int i = 0; i < nop_size; i += k16BitT32InstructionSizeInBytes) {
__ nop();
}
@@ -5089,8 +5022,8 @@
// for).
const int32_t kTypicalMacroInstructionMaxSize =
8 * kMaxInstructionSizeInBytes;
- int32_t margin = masm.GetMarginBeforeLiteralEmission()
- - kTypicalMacroInstructionMaxSize;
+ int32_t margin =
+ masm.GetMarginBeforeLiteralEmission() - kTypicalMacroInstructionMaxSize;
int32_t end = masm.GetCursorOffset() + margin;
{
@@ -5099,8 +5032,8 @@
__ nop();
}
}
- VIXL_CHECK(masm.GetMarginBeforeLiteralEmission()
- == kTypicalMacroInstructionMaxSize);
+ VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() ==
+ kTypicalMacroInstructionMaxSize);
// We cannot use an IT block for this instruction, hence ITScope will
// generate a branch over it.
@@ -5125,16 +5058,18 @@
START();
- const uint32_t src[4] = { 0x12345678, 0x09abcdef, 0xc001c0de, 0xdeadbeef };
- uint32_t dst1[4] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
- uint32_t dst2[4] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const uint32_t src[4] = {0x12345678, 0x09abcdef, 0xc001c0de, 0xdeadbeef};
+ uint32_t dst1[4] = {0x00000000, 0x00000000, 0x00000000, 0x00000000};
+ uint32_t dst2[4] = {0x00000000, 0x00000000, 0x00000000, 0x00000000};
__ Mov(r0, reinterpret_cast<uintptr_t>(src));
- __ Ldm(r0, NO_WRITE_BACK, RegisterList(r1, r2, r3, r4));;
+ __ Ldm(r0, NO_WRITE_BACK, RegisterList(r1, r2, r3, r4));
+ ;
__ Ldm(r0, NO_WRITE_BACK, RegisterList(r5, r6, r9, r11));
__ Mov(r0, reinterpret_cast<uintptr_t>(dst1));
- __ Stm(r0, NO_WRITE_BACK, RegisterList(r1, r2, r3, r4));;
+ __ Stm(r0, NO_WRITE_BACK, RegisterList(r1, r2, r3, r4));
+ ;
__ Mov(r0, reinterpret_cast<uintptr_t>(dst2));
__ Stm(r0, NO_WRITE_BACK, RegisterList(r5, r6, r9, r11));
@@ -5172,13 +5107,21 @@
START();
- const uint32_t src[4] = { 0x12345678, 0x09abcdef, 0xc001c0de, 0xdeadbeef };
- uint32_t dst[8] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const uint32_t src[4] = {0x12345678, 0x09abcdef, 0xc001c0de, 0xdeadbeef};
+ uint32_t dst[8] = {0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000};
__ Mov(r0, reinterpret_cast<uintptr_t>(src));
- __ Ldm(r0, WRITE_BACK, RegisterList(r2, r3));;
- __ Ldm(r0, WRITE_BACK, RegisterList(r4, r5));;
+ __ Ldm(r0, WRITE_BACK, RegisterList(r2, r3));
+ ;
+ __ Ldm(r0, WRITE_BACK, RegisterList(r4, r5));
+ ;
__ Mov(r1, reinterpret_cast<uintptr_t>(dst));
__ Stm(r1, WRITE_BACK, RegisterList(r2, r3, r4, r5));
@@ -5214,11 +5157,11 @@
START();
- const uint32_t src1[4] = { 0x33333333, 0x44444444, 0x11111111, 0x22222222 };
- const uint32_t src2[4] = { 0x11111111, 0x22222222, 0x33333333, 0x44444444 };
+ const uint32_t src1[4] = {0x33333333, 0x44444444, 0x11111111, 0x22222222};
+ const uint32_t src2[4] = {0x11111111, 0x22222222, 0x33333333, 0x44444444};
- uint32_t dst1[4] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
- uint32_t dst2[4] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ uint32_t dst1[4] = {0x00000000, 0x00000000, 0x00000000, 0x00000000};
+ uint32_t dst2[4] = {0x00000000, 0x00000000, 0x00000000, 0x00000000};
__ Mov(r11, reinterpret_cast<uintptr_t>(src1 + 3));
__ Ldmda(r11, WRITE_BACK, RegisterList(r0, r1));
@@ -5242,7 +5185,7 @@
RUN();
ASSERT_EQUAL_32(reinterpret_cast<uintptr_t>(src1 + 1), r11);
- ASSERT_EQUAL_32(reinterpret_cast<uintptr_t>(src2 + 1), r10);
+ ASSERT_EQUAL_32(reinterpret_cast<uintptr_t>(src2 + 1), r10);
ASSERT_EQUAL_32(reinterpret_cast<uintptr_t>(dst1 + 1), r9);
ASSERT_EQUAL_32(reinterpret_cast<uintptr_t>(dst2 + 1), r8);
@@ -5275,12 +5218,11 @@
START();
- const uint32_t src[6] = { 0x55555555, 0x66666666,
- 0x33333333, 0x44444444,
- 0x11111111, 0x22222222 };
+ const uint32_t src[6] =
+ {0x55555555, 0x66666666, 0x33333333, 0x44444444, 0x11111111, 0x22222222};
- uint32_t dst[6] = { 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000 };
+ uint32_t dst[6] =
+ {0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000};
__ Mov(r11, reinterpret_cast<uintptr_t>(src + 6));
__ Ldmdb(r11, WRITE_BACK, RegisterList(r1, r2));
@@ -5336,19 +5278,32 @@
__ Bind(&func1);
__ Mov(r0, 0x11111111);
__ Push(lr);
- __ Adr(r11, &func2);
- if (masm.IsUsingT32()) {
- // The jump target needs to have its least significant bit set to indicate
- // that we are jumping into thumb mode.
- __ Orr(r11, r11, 1);
- }
- __ Blx(r11);
- __ Pop(lr);
- __ Bx(lr);
+ {
+ size_t size_of_generated_code;
+ if (masm.IsUsingA32()) {
+ size_of_generated_code = 7 * kA32InstructionSizeInBytes;
+ } else {
+ size_of_generated_code = 5 * k32BitT32InstructionSizeInBytes +
+ 3 * k16BitT32InstructionSizeInBytes;
+ }
+ ExactAssemblyScope scope(&masm,
+ size_of_generated_code,
+ ExactAssemblyScope::kExactSize);
+ __ adr(r11, &func2);
+ if (masm.IsUsingT32()) {
+ // The jump target needs to have its least significant bit set to indicate
+ // that we are jumping into thumb mode.
+ __ orr(r11, r11, 1);
+ }
+ __ blx(r11);
+ __ pop(lr);
+ __ bx(lr);
- __ Bind(&func2);
- __ Mov(r1, 0x22222222);
- __ Bx(lr);
+ __ bind(&func2);
+ __ movw(r1, 0x2222);
+ __ movt(r1, 0x2222);
+ __ bx(lr);
+ }
__ Bind(&test_start);
__ Mov(r0, 0xdeadc0de);
@@ -5678,51 +5633,55 @@
}
-#define CHECK_SIZE_MATCH(ASM1, ASM2) \
- { \
- MacroAssembler masm1(BUF_SIZE); \
- masm1.UseInstructionSet(isa); \
- VIXL_ASSERT(masm1.GetCursorOffset() == 0); \
- masm1.ASM1; \
- masm1.FinalizeCode(); \
- int size1 = masm1.GetCursorOffset(); \
- \
- MacroAssembler masm2(BUF_SIZE); \
- masm2.UseInstructionSet(isa); \
- VIXL_ASSERT(masm2.GetCursorOffset() == 0); \
- masm2.ASM2; \
- masm2.FinalizeCode(); \
- int size2 = masm2.GetCursorOffset(); \
- \
- bool disassemble = Test::disassemble(); \
- if (size1 != size2) { \
- printf("Sizes did not match:\n"); \
- disassemble = true; \
- } \
- if (disassemble) { \
- PrintDisassembler dis(std::cout, 0); \
- printf("// " #ASM1 "\n"); \
- if (masm1.IsUsingT32()) { \
- dis.DisassembleT32Buffer( \
- masm1.GetBuffer()->GetStartAddress<uint16_t*>(), size1); \
- } else { \
- dis.DisassembleA32Buffer( \
- masm1.GetBuffer()->GetStartAddress<uint32_t*>(), size1); \
- } \
- printf("\n"); \
- \
- dis.SetCodeAddress(0); \
- printf("// " #ASM2 "\n"); \
- if (masm2.IsUsingT32()) { \
- dis.DisassembleT32Buffer( \
- masm2.GetBuffer()->GetStartAddress<uint16_t*>(), size2); \
- } else { \
- dis.DisassembleA32Buffer( \
- masm2.GetBuffer()->GetStartAddress<uint32_t*>(), size2); \
- } \
- printf("\n"); \
- } \
- VIXL_CHECK(size1 == size2); \
+#define CHECK_SIZE_MATCH(ASM1, ASM2) \
+ { \
+ MacroAssembler masm1(BUF_SIZE); \
+ masm1.UseInstructionSet(isa); \
+ VIXL_ASSERT(masm1.GetCursorOffset() == 0); \
+ masm1.ASM1; \
+ masm1.FinalizeCode(); \
+ int size1 = masm1.GetCursorOffset(); \
+ \
+ MacroAssembler masm2(BUF_SIZE); \
+ masm2.UseInstructionSet(isa); \
+ VIXL_ASSERT(masm2.GetCursorOffset() == 0); \
+ masm2.ASM2; \
+ masm2.FinalizeCode(); \
+ int size2 = masm2.GetCursorOffset(); \
+ \
+ bool disassemble = Test::disassemble(); \
+ if (size1 != size2) { \
+ printf("Sizes did not match:\n"); \
+ disassemble = true; \
+ } \
+ if (disassemble) { \
+ PrintDisassembler dis(std::cout, 0); \
+ printf("// " #ASM1 "\n"); \
+ if (masm1.IsUsingT32()) { \
+ dis.DisassembleT32Buffer(masm1.GetBuffer() \
+ ->GetStartAddress<uint16_t*>(), \
+ size1); \
+ } else { \
+ dis.DisassembleA32Buffer(masm1.GetBuffer() \
+ ->GetStartAddress<uint32_t*>(), \
+ size1); \
+ } \
+ printf("\n"); \
+ \
+ dis.SetCodeAddress(0); \
+ printf("// " #ASM2 "\n"); \
+ if (masm2.IsUsingT32()) { \
+ dis.DisassembleT32Buffer(masm2.GetBuffer() \
+ ->GetStartAddress<uint16_t*>(), \
+ size2); \
+ } else { \
+ dis.DisassembleA32Buffer(masm2.GetBuffer() \
+ ->GetStartAddress<uint32_t*>(), \
+ size2); \
+ } \
+ printf("\n"); \
+ } \
+ VIXL_CHECK(size1 == size2); \
}
@@ -5739,8 +5698,7 @@
// CHECK_SIZE_MATCH(Adc(DontCare, eq, r7, r6, r7),
// Adc(DontCare, eq, r7, r7, r6));
- CHECK_SIZE_MATCH(Add(DontCare, r1, r2, r7),
- Add(DontCare, r1, r7, r2));
+ CHECK_SIZE_MATCH(Add(DontCare, r1, r2, r7), Add(DontCare, r1, r7, r2));
CHECK_SIZE_MATCH(Add(DontCare, lt, r1, r2, r7),
Add(DontCare, lt, r1, r7, r2));
@@ -5788,17 +5746,14 @@
// Orr(DontCare, eq, r7, r6, r7));
- CHECK_SIZE_MATCH(Adc(r7, r6, r7),
- Adc(r7, r7, r6));
+ CHECK_SIZE_MATCH(Adc(r7, r6, r7), Adc(r7, r7, r6));
// CHECK_SIZE_MATCH(Adc(eq, r7, r6, r7),
// Adc(eq, r7, r7, r6));
- CHECK_SIZE_MATCH(Add(r1, r2, r7),
- Add(r1, r7, r2));
+ CHECK_SIZE_MATCH(Add(r1, r2, r7), Add(r1, r7, r2));
- CHECK_SIZE_MATCH(Add(lt, r1, r2, r7),
- Add(lt, r1, r7, r2));
+ CHECK_SIZE_MATCH(Add(lt, r1, r2, r7), Add(lt, r1, r7, r2));
// CHECK_SIZE_MATCH(Add(r4, r4, r10),
// Add(r4, r10, r4));
@@ -5818,26 +5773,22 @@
// CHECK_SIZE_MATCH(Add(eq, sp, sp, r10),
// Add(eq, sp, r10, sp));
- CHECK_SIZE_MATCH(And(r7, r7, r6),
- And(r7, r6, r7));
+ CHECK_SIZE_MATCH(And(r7, r7, r6), And(r7, r6, r7));
// CHECK_SIZE_MATCH(And(eq, r7, r7, r6),
// And(eq, r7, r6, r7));
- CHECK_SIZE_MATCH(Eor(r7, r7, r6),
- Eor(r7, r6, r7));
+ CHECK_SIZE_MATCH(Eor(r7, r7, r6), Eor(r7, r6, r7));
// CHECK_SIZE_MATCH(Eor(eq, r7, r7, r6),
// Eor(eq, r7, r6, r7));
- CHECK_SIZE_MATCH(Mul(r0, r1, r0),
- Mul(r0, r0, r1));
+ CHECK_SIZE_MATCH(Mul(r0, r1, r0), Mul(r0, r0, r1));
// CHECK_SIZE_MATCH(Mul(eq, r0, r1, r0),
// Mul(eq, r0, r0, r1));
- CHECK_SIZE_MATCH(Orr(r7, r7, r6),
- Orr(r7, r6, r7));
+ CHECK_SIZE_MATCH(Orr(r7, r7, r6), Orr(r7, r6, r7));
// CHECK_SIZE_MATCH(Orr(eq, r7, r7, r6),
// Orr(eq, r7, r6, r7));
@@ -5849,29 +5800,21 @@
// CHECK_SIZE_MATCH(Adcs(eq, r7, r6, r7),
// Adcs(eq, r7, r7, r6));
- CHECK_SIZE_MATCH(Adds(r1, r2, r7),
- Adds(r1, r7, r2));
+ CHECK_SIZE_MATCH(Adds(r1, r2, r7), Adds(r1, r7, r2));
- CHECK_SIZE_MATCH(Adds(lt, r1, r2, r7),
- Adds(lt, r1, r7, r2));
+ CHECK_SIZE_MATCH(Adds(lt, r1, r2, r7), Adds(lt, r1, r7, r2));
- CHECK_SIZE_MATCH(Adds(r4, r4, r10),
- Adds(r4, r10, r4));
+ CHECK_SIZE_MATCH(Adds(r4, r4, r10), Adds(r4, r10, r4));
- CHECK_SIZE_MATCH(Adds(eq, r4, r4, r10),
- Adds(eq, r4, r10, r4));
+ CHECK_SIZE_MATCH(Adds(eq, r4, r4, r10), Adds(eq, r4, r10, r4));
- CHECK_SIZE_MATCH(Adds(r7, sp, r7),
- Adds(r7, r7, sp));
+ CHECK_SIZE_MATCH(Adds(r7, sp, r7), Adds(r7, r7, sp));
- CHECK_SIZE_MATCH(Adds(eq, r7, sp, r7),
- Adds(eq, r7, r7, sp));
+ CHECK_SIZE_MATCH(Adds(eq, r7, sp, r7), Adds(eq, r7, r7, sp));
- CHECK_SIZE_MATCH(Adds(sp, sp, r10),
- Adds(sp, r10, sp));
+ CHECK_SIZE_MATCH(Adds(sp, sp, r10), Adds(sp, r10, sp));
- CHECK_SIZE_MATCH(Adds(eq, sp, sp, r10),
- Adds(eq, sp, r10, sp));
+ CHECK_SIZE_MATCH(Adds(eq, sp, sp, r10), Adds(eq, sp, r10, sp));
// CHECK_SIZE_MATCH(Ands(r7, r7, r6),
// Ands(r7, r6, r7));
diff --git a/test/aarch32/test-assembler-cond-rd-operand-const-a32-can-use-pc.cc b/test/aarch32/test-assembler-cond-rd-operand-const-a32-can-use-pc.cc
index 5c19b43..fb5fb0d 100644
--- a/test/aarch32/test-assembler-cond-rd-operand-const-a32-can-use-pc.cc
+++ b/test/aarch32/test-assembler-cond-rd-operand-const-a32-can-use-pc.cc
@@ -94,703 +94,1471 @@
};
// Each element of this array produce one instruction encoding.
-const TestData kTests[] = {
- {{ls, r10, 0x00ab0000}, false, al, "ls r10 0x00ab0000", "ls_r10_"
- "0x00ab0000"},
- {{ls, r13, 0xf000000f}, false, al, "ls r13 0xf000000f", "ls_r13_"
- "0xf000000f"},
- {{pl, r4, 0x00003fc0}, false, al, "pl r4 0x00003fc0", "pl_r4_0x00003fc0"},
- {{ne, r15, 0x00ab0000}, false, al, "ne r15 0x00ab0000", "ne_r15_"
- "0x00ab0000"},
- {{lt, r14, 0x002ac000}, false, al, "lt r14 0x002ac000", "lt_r14_"
- "0x002ac000"},
- {{eq, r15, 0x000000ab}, false, al, "eq r15 0x000000ab", "eq_r15_"
- "0x000000ab"},
- {{al, r6, 0x002ac000}, false, al, "al r6 0x002ac000", "al_r6_0x002ac000"},
- {{pl, r2, 0x0002ac00}, false, al, "pl r2 0x0002ac00", "pl_r2_0x0002ac00"},
- {{mi, r8, 0x00000000}, false, al, "mi r8 0x00000000", "mi_r8_0x00000000"},
- {{pl, r13, 0x02ac0000}, false, al, "pl r13 0x02ac0000", "pl_r13_"
- "0x02ac0000"},
- {{ge, r9, 0xac000002}, false, al, "ge r9 0xac000002", "ge_r9_0xac000002"},
- {{ne, r3, 0x000003fc}, false, al, "ne r3 0x000003fc", "ne_r3_0x000003fc"},
- {{pl, r10, 0xf000000f}, false, al, "pl r10 0xf000000f", "pl_r10_"
- "0xf000000f"},
- {{cc, r0, 0x00003fc0}, false, al, "cc r0 0x00003fc0", "cc_r0_0x00003fc0"},
- {{gt, r6, 0x002ac000}, false, al, "gt r6 0x002ac000", "gt_r6_0x002ac000"},
- {{cs, r1, 0x00ff0000}, false, al, "cs r1 0x00ff0000", "cs_r1_0x00ff0000"},
- {{lt, r8, 0x000002ac}, false, al, "lt r8 0x000002ac", "lt_r8_0x000002ac"},
- {{vc, r4, 0x2ac00000}, false, al, "vc r4 0x2ac00000", "vc_r4_0x2ac00000"},
- {{al, r11, 0x00003fc0}, false, al, "al r11 0x00003fc0", "al_r11_"
- "0x00003fc0"},
- {{lt, r9, 0x000000ab}, false, al, "lt r9 0x000000ab", "lt_r9_0x000000ab"},
- {{le, r15, 0x0000ff00}, false, al, "le r15 0x0000ff00", "le_r15_"
- "0x0000ff00"},
- {{vc, r9, 0xff000000}, false, al, "vc r9 0xff000000", "vc_r9_0xff000000"},
- {{ge, r0, 0x0ff00000}, false, al, "ge r0 0x0ff00000", "ge_r0_0x0ff00000"},
- {{vc, r1, 0x00ff0000}, false, al, "vc r1 0x00ff0000", "vc_r1_0x00ff0000"},
- {{vs, r12, 0x00000ff0}, false, al, "vs r12 0x00000ff0", "vs_r12_"
- "0x00000ff0"},
- {{pl, r13, 0x003fc000}, false, al, "pl r13 0x003fc000", "pl_r13_"
- "0x003fc000"},
- {{eq, r4, 0xff000000}, false, al, "eq r4 0xff000000", "eq_r4_0xff000000"},
- {{eq, r12, 0xac000002}, false, al, "eq r12 0xac000002", "eq_r12_"
- "0xac000002"},
- {{al, r11, 0x000000ab}, false, al, "al r11 0x000000ab", "al_r11_"
- "0x000000ab"},
- {{ge, r10, 0x00ff0000}, false, al, "ge r10 0x00ff0000", "ge_r10_"
- "0x00ff0000"},
- {{vs, r7, 0x0002ac00}, false, al, "vs r7 0x0002ac00", "vs_r7_0x0002ac00"},
- {{le, r7, 0x002ac000}, false, al, "le r7 0x002ac000", "le_r7_0x002ac000"},
- {{vc, r9, 0x000003fc}, false, al, "vc r9 0x000003fc", "vc_r9_0x000003fc"},
- {{vs, r14, 0x002ac000}, false, al, "vs r14 0x002ac000", "vs_r14_"
- "0x002ac000"},
- {{vs, r1, 0x000003fc}, false, al, "vs r1 0x000003fc", "vs_r1_0x000003fc"},
- {{le, r13, 0x3fc00000}, false, al, "le r13 0x3fc00000", "le_r13_"
- "0x3fc00000"},
- {{ne, r9, 0x0002ac00}, false, al, "ne r9 0x0002ac00", "ne_r9_0x0002ac00"},
- {{al, r0, 0x00ab0000}, false, al, "al r0 0x00ab0000", "al_r0_0x00ab0000"},
- {{hi, r5, 0xff000000}, false, al, "hi r5 0xff000000", "hi_r5_0xff000000"},
- {{hi, r14, 0x000000ab}, false, al, "hi r14 0x000000ab", "hi_r14_"
- "0x000000ab"},
- {{hi, r14, 0x000003fc}, false, al, "hi r14 0x000003fc", "hi_r14_"
- "0x000003fc"},
- {{mi, r14, 0x03fc0000}, false, al, "mi r14 0x03fc0000", "mi_r14_"
- "0x03fc0000"},
- {{mi, r8, 0x000000ff}, false, al, "mi r8 0x000000ff", "mi_r8_0x000000ff"},
- {{gt, r6, 0x000ab000}, false, al, "gt r6 0x000ab000", "gt_r6_0x000ab000"},
- {{mi, r12, 0x0ff00000}, false, al, "mi r12 0x0ff00000", "mi_r12_"
- "0x0ff00000"},
- {{vs, r3, 0xff000000}, false, al, "vs r3 0xff000000", "vs_r3_0xff000000"},
- {{vc, r7, 0x00ab0000}, false, al, "vc r7 0x00ab0000", "vc_r7_0x00ab0000"},
- {{hi, r6, 0x03fc0000}, false, al, "hi r6 0x03fc0000", "hi_r6_0x03fc0000"},
- {{ls, r5, 0x00ab0000}, false, al, "ls r5 0x00ab0000", "ls_r5_0x00ab0000"},
- {{ls, r4, 0x000002ac}, false, al, "ls r4 0x000002ac", "ls_r4_0x000002ac"},
- {{le, r13, 0x03fc0000}, false, al, "le r13 0x03fc0000", "le_r13_"
- "0x03fc0000"},
- {{le, r15, 0x002ac000}, false, al, "le r15 0x002ac000", "le_r15_"
- "0x002ac000"},
- {{pl, r6, 0xff000000}, false, al, "pl r6 0xff000000", "pl_r6_0xff000000"},
- {{gt, r7, 0x00000ab0}, false, al, "gt r7 0x00000ab0", "gt_r7_0x00000ab0"},
- {{hi, r3, 0x2ac00000}, false, al, "hi r3 0x2ac00000", "hi_r3_0x2ac00000"},
- {{gt, r15, 0x00000000}, false, al, "gt r15 0x00000000", "gt_r15_"
- "0x00000000"},
- {{gt, r1, 0x002ac000}, false, al, "gt r1 0x002ac000", "gt_r1_0x002ac000"},
- {{hi, r2, 0x0000ff00}, false, al, "hi r2 0x0000ff00", "hi_r2_0x0000ff00"},
- {{pl, r10, 0x03fc0000}, false, al, "pl r10 0x03fc0000", "pl_r10_"
- "0x03fc0000"},
- {{gt, r10, 0xb000000a}, false, al, "gt r10 0xb000000a", "gt_r10_"
- "0xb000000a"},
- {{ge, r10, 0x002ac000}, false, al, "ge r10 0x002ac000", "ge_r10_"
- "0x002ac000"},
- {{al, r4, 0x0ab00000}, false, al, "al r4 0x0ab00000", "al_r4_0x0ab00000"},
- {{ne, r12, 0x00000000}, false, al, "ne r12 0x00000000", "ne_r12_"
- "0x00000000"},
- {{cs, r5, 0x00003fc0}, false, al, "cs r5 0x00003fc0", "cs_r5_0x00003fc0"},
- {{cc, r6, 0x000000ff}, false, al, "cc r6 0x000000ff", "cc_r6_0x000000ff"},
- {{hi, r14, 0x03fc0000}, false, al, "hi r14 0x03fc0000", "hi_r14_"
- "0x03fc0000"},
- {{hi, r3, 0xab000000}, false, al, "hi r3 0xab000000", "hi_r3_0xab000000"},
- {{lt, r2, 0x000ff000}, false, al, "lt r2 0x000ff000", "lt_r2_0x000ff000"},
- {{gt, r3, 0x00ff0000}, false, al, "gt r3 0x00ff0000", "gt_r3_0x00ff0000"},
- {{le, r14, 0x0ab00000}, false, al, "le r14 0x0ab00000", "le_r14_"
- "0x0ab00000"},
- {{cc, r13, 0x03fc0000}, false, al, "cc r13 0x03fc0000", "cc_r13_"
- "0x03fc0000"},
- {{gt, r6, 0xb000000a}, false, al, "gt r6 0xb000000a", "gt_r6_0xb000000a"},
- {{pl, r3, 0x2ac00000}, false, al, "pl r3 0x2ac00000", "pl_r3_0x2ac00000"},
- {{mi, r15, 0x000000ff}, false, al, "mi r15 0x000000ff", "mi_r15_"
- "0x000000ff"},
- {{gt, r4, 0x00000ff0}, false, al, "gt r4 0x00000ff0", "gt_r4_0x00000ff0"},
- {{ne, r4, 0x000003fc}, false, al, "ne r4 0x000003fc", "ne_r4_0x000003fc"},
- {{ne, r5, 0x00ff0000}, false, al, "ne r5 0x00ff0000", "ne_r5_0x00ff0000"},
- {{ge, r7, 0x000003fc}, false, al, "ge r7 0x000003fc", "ge_r7_0x000003fc"},
- {{vs, r5, 0x00000000}, false, al, "vs r5 0x00000000", "vs_r5_0x00000000"},
- {{vs, r6, 0x2ac00000}, false, al, "vs r6 0x2ac00000", "vs_r6_0x2ac00000"},
- {{mi, r1, 0x3fc00000}, false, al, "mi r1 0x3fc00000", "mi_r1_0x3fc00000"},
- {{gt, r8, 0xc000002a}, false, al, "gt r8 0xc000002a", "gt_r8_0xc000002a"},
- {{pl, r10, 0x000000ab}, false, al, "pl r10 0x000000ab", "pl_r10_"
- "0x000000ab"},
- {{mi, r6, 0xab000000}, false, al, "mi r6 0xab000000", "mi_r6_0xab000000"},
- {{ls, r12, 0x3fc00000}, false, al, "ls r12 0x3fc00000", "ls_r12_"
- "0x3fc00000"},
- {{vs, r13, 0x03fc0000}, false, al, "vs r13 0x03fc0000", "vs_r13_"
- "0x03fc0000"},
- {{eq, r8, 0x00000ab0}, false, al, "eq r8 0x00000ab0", "eq_r8_0x00000ab0"},
- {{gt, r6, 0x000000ff}, false, al, "gt r6 0x000000ff", "gt_r6_0x000000ff"},
- {{vs, r5, 0x000ab000}, false, al, "vs r5 0x000ab000", "vs_r5_0x000ab000"},
- {{mi, r1, 0x2ac00000}, false, al, "mi r1 0x2ac00000", "mi_r1_0x2ac00000"},
- {{vs, r6, 0x00003fc0}, false, al, "vs r6 0x00003fc0", "vs_r6_0x00003fc0"},
- {{gt, r6, 0x000002ac}, false, al, "gt r6 0x000002ac", "gt_r6_0x000002ac"},
- {{eq, r2, 0x00000000}, false, al, "eq r2 0x00000000", "eq_r2_0x00000000"},
- {{eq, r10, 0xc000003f}, false, al, "eq r10 0xc000003f", "eq_r10_"
- "0xc000003f"},
- {{mi, r7, 0x00ab0000}, false, al, "mi r7 0x00ab0000", "mi_r7_0x00ab0000"},
- {{cc, r7, 0x2ac00000}, false, al, "cc r7 0x2ac00000", "cc_r7_0x2ac00000"},
- {{pl, r4, 0x00ab0000}, false, al, "pl r4 0x00ab0000", "pl_r4_0x00ab0000"},
- {{ne, r15, 0x00000ff0}, false, al, "ne r15 0x00000ff0", "ne_r15_"
- "0x00000ff0"},
- {{al, r6, 0x02ac0000}, false, al, "al r6 0x02ac0000", "al_r6_0x02ac0000"},
- {{pl, r6, 0x000002ac}, false, al, "pl r6 0x000002ac", "pl_r6_0x000002ac"},
- {{ne, r14, 0x00ff0000}, false, al, "ne r14 0x00ff0000", "ne_r14_"
- "0x00ff0000"},
- {{ne, r5, 0x0003fc00}, false, al, "ne r5 0x0003fc00", "ne_r5_0x0003fc00"},
- {{pl, r6, 0x00000ab0}, false, al, "pl r6 0x00000ab0", "pl_r6_0x00000ab0"},
- {{eq, r10, 0x00002ac0}, false, al, "eq r10 0x00002ac0", "eq_r10_"
- "0x00002ac0"},
- {{mi, r4, 0x00000ab0}, false, al, "mi r4 0x00000ab0", "mi_r4_0x00000ab0"},
- {{vc, r5, 0x000ab000}, false, al, "vc r5 0x000ab000", "vc_r5_0x000ab000"},
- {{ge, r0, 0x02ac0000}, false, al, "ge r0 0x02ac0000", "ge_r0_0x02ac0000"},
- {{pl, r5, 0x000002ac}, false, al, "pl r5 0x000002ac", "pl_r5_0x000002ac"},
- {{ge, r13, 0x00000ab0}, false, al, "ge r13 0x00000ab0", "ge_r13_"
- "0x00000ab0"},
- {{eq, r2, 0x03fc0000}, false, al, "eq r2 0x03fc0000", "eq_r2_0x03fc0000"},
- {{lt, r11, 0x00000ab0}, false, al, "lt r11 0x00000ab0", "lt_r11_"
- "0x00000ab0"},
- {{ge, r6, 0x00000000}, false, al, "ge r6 0x00000000", "ge_r6_0x00000000"},
- {{gt, r2, 0xac000002}, false, al, "gt r2 0xac000002", "gt_r2_0xac000002"},
- {{le, r15, 0x000000ab}, false, al, "le r15 0x000000ab", "le_r15_"
- "0x000000ab"},
- {{cc, r4, 0x00000ff0}, false, al, "cc r4 0x00000ff0", "cc_r4_0x00000ff0"},
- {{pl, r10, 0x02ac0000}, false, al, "pl r10 0x02ac0000", "pl_r10_"
- "0x02ac0000"},
- {{gt, r9, 0x00000000}, false, al, "gt r9 0x00000000", "gt_r9_0x00000000"},
- {{vs, r8, 0x000000ff}, false, al, "vs r8 0x000000ff", "vs_r8_0x000000ff"},
- {{gt, r14, 0x0002ac00}, false, al, "gt r14 0x0002ac00", "gt_r14_"
- "0x0002ac00"},
- {{vs, r14, 0x00002ac0}, false, al, "vs r14 0x00002ac0", "vs_r14_"
- "0x00002ac0"},
- {{ge, r12, 0x00000000}, false, al, "ge r12 0x00000000", "ge_r12_"
- "0x00000000"},
- {{vc, r8, 0xf000000f}, false, al, "vc r8 0xf000000f", "vc_r8_0xf000000f"},
- {{cs, r6, 0x00003fc0}, false, al, "cs r6 0x00003fc0", "cs_r6_0x00003fc0"},
- {{le, r4, 0x000003fc}, false, al, "le r4 0x000003fc", "le_r4_0x000003fc"},
- {{cs, r5, 0x000ff000}, false, al, "cs r5 0x000ff000", "cs_r5_0x000ff000"},
- {{eq, r2, 0x0000ff00}, false, al, "eq r2 0x0000ff00", "eq_r2_0x0000ff00"},
- {{pl, r10, 0x0ab00000}, false, al, "pl r10 0x0ab00000", "pl_r10_"
- "0x0ab00000"},
- {{le, r11, 0xac000002}, false, al, "le r11 0xac000002", "le_r11_"
- "0xac000002"},
- {{vs, r15, 0x00003fc0}, false, al, "vs r15 0x00003fc0", "vs_r15_"
- "0x00003fc0"},
- {{lt, r2, 0x0002ac00}, false, al, "lt r2 0x0002ac00", "lt_r2_0x0002ac00"},
- {{eq, r1, 0x00ab0000}, false, al, "eq r1 0x00ab0000", "eq_r1_0x00ab0000"},
- {{cc, r7, 0x03fc0000}, false, al, "cc r7 0x03fc0000", "cc_r7_0x03fc0000"},
- {{mi, r6, 0x00000ab0}, false, al, "mi r6 0x00000ab0", "mi_r6_0x00000ab0"},
- {{eq, r4, 0x00000ab0}, false, al, "eq r4 0x00000ab0", "eq_r4_0x00000ab0"},
- {{ls, r3, 0x0003fc00}, false, al, "ls r3 0x0003fc00", "ls_r3_0x0003fc00"},
- {{mi, r6, 0x000ab000}, false, al, "mi r6 0x000ab000", "mi_r6_0x000ab000"},
- {{ne, r12, 0x003fc000}, false, al, "ne r12 0x003fc000", "ne_r12_"
- "0x003fc000"},
- {{eq, r11, 0x00ff0000}, false, al, "eq r11 0x00ff0000", "eq_r11_"
- "0x00ff0000"},
- {{cs, r13, 0x00ab0000}, false, al, "cs r13 0x00ab0000", "cs_r13_"
- "0x00ab0000"},
- {{eq, r5, 0x000003fc}, false, al, "eq r5 0x000003fc", "eq_r5_0x000003fc"},
- {{vs, r6, 0x0003fc00}, false, al, "vs r6 0x0003fc00", "vs_r6_0x0003fc00"},
- {{pl, r8, 0x0ff00000}, false, al, "pl r8 0x0ff00000", "pl_r8_0x0ff00000"},
- {{pl, r11, 0x0000ab00}, false, al, "pl r11 0x0000ab00", "pl_r11_"
- "0x0000ab00"},
- {{le, r2, 0xac000002}, false, al, "le r2 0xac000002", "le_r2_0xac000002"},
- {{vc, r10, 0x000ff000}, false, al, "vc r10 0x000ff000", "vc_r10_"
- "0x000ff000"},
- {{le, r4, 0x00000ff0}, false, al, "le r4 0x00000ff0", "le_r4_0x00000ff0"},
- {{gt, r12, 0x00000ff0}, false, al, "gt r12 0x00000ff0", "gt_r12_"
- "0x00000ff0"},
- {{le, r5, 0x0002ac00}, false, al, "le r5 0x0002ac00", "le_r5_0x0002ac00"},
- {{le, r0, 0xac000002}, false, al, "le r0 0xac000002", "le_r0_0xac000002"},
- {{vs, r11, 0x0ff00000}, false, al, "vs r11 0x0ff00000", "vs_r11_"
- "0x0ff00000"},
- {{ls, r0, 0x000ab000}, false, al, "ls r0 0x000ab000", "ls_r0_0x000ab000"},
- {{ls, r2, 0xf000000f}, false, al, "ls r2 0xf000000f", "ls_r2_0xf000000f"},
- {{cs, r3, 0x0ff00000}, false, al, "cs r3 0x0ff00000", "cs_r3_0x0ff00000"},
- {{hi, r8, 0x0ff00000}, false, al, "hi r8 0x0ff00000", "hi_r8_0x0ff00000"},
- {{gt, r3, 0x00002ac0}, false, al, "gt r3 0x00002ac0", "gt_r3_0x00002ac0"},
- {{al, r15, 0xab000000}, false, al, "al r15 0xab000000", "al_r15_"
- "0xab000000"},
- {{eq, r13, 0x000000ab}, false, al, "eq r13 0x000000ab", "eq_r13_"
- "0x000000ab"},
- {{al, r2, 0xc000002a}, false, al, "al r2 0xc000002a", "al_r2_0xc000002a"},
- {{eq, r13, 0x03fc0000}, false, al, "eq r13 0x03fc0000", "eq_r13_"
- "0x03fc0000"},
- {{eq, r3, 0x00000ff0}, false, al, "eq r3 0x00000ff0", "eq_r3_0x00000ff0"},
- {{hi, r12, 0x00002ac0}, false, al, "hi r12 0x00002ac0", "hi_r12_"
- "0x00002ac0"},
- {{mi, r2, 0x0ff00000}, false, al, "mi r2 0x0ff00000", "mi_r2_0x0ff00000"},
- {{ne, r9, 0x003fc000}, false, al, "ne r9 0x003fc000", "ne_r9_0x003fc000"},
- {{eq, r14, 0x03fc0000}, false, al, "eq r14 0x03fc0000", "eq_r14_"
- "0x03fc0000"},
- {{cc, r0, 0x002ac000}, false, al, "cc r0 0x002ac000", "cc_r0_0x002ac000"},
- {{vc, r14, 0x00000ab0}, false, al, "vc r14 0x00000ab0", "vc_r14_"
- "0x00000ab0"},
- {{mi, r15, 0xf000000f}, false, al, "mi r15 0xf000000f", "mi_r15_"
- "0xf000000f"},
- {{ge, r9, 0x000003fc}, false, al, "ge r9 0x000003fc", "ge_r9_0x000003fc"},
- {{vs, r13, 0xac000002}, false, al, "vs r13 0xac000002", "vs_r13_"
- "0xac000002"},
- {{vs, r1, 0x3fc00000}, false, al, "vs r1 0x3fc00000", "vs_r1_0x3fc00000"},
- {{eq, r12, 0x00003fc0}, false, al, "eq r12 0x00003fc0", "eq_r12_"
- "0x00003fc0"},
- {{mi, r6, 0xff000000}, false, al, "mi r6 0xff000000", "mi_r6_0xff000000"},
- {{ne, r5, 0x000003fc}, false, al, "ne r5 0x000003fc", "ne_r5_0x000003fc"},
- {{lt, r8, 0x0ff00000}, false, al, "lt r8 0x0ff00000", "lt_r8_0x0ff00000"},
- {{hi, r7, 0x3fc00000}, false, al, "hi r7 0x3fc00000", "hi_r7_0x3fc00000"},
- {{ge, r10, 0xac000002}, false, al, "ge r10 0xac000002", "ge_r10_"
- "0xac000002"},
- {{vs, r2, 0x0000ff00}, false, al, "vs r2 0x0000ff00", "vs_r2_0x0000ff00"},
- {{al, r6, 0x000000ab}, false, al, "al r6 0x000000ab", "al_r6_0x000000ab"},
- {{ge, r7, 0x00ff0000}, false, al, "ge r7 0x00ff0000", "ge_r7_0x00ff0000"},
- {{ne, r0, 0x000ff000}, false, al, "ne r0 0x000ff000", "ne_r0_0x000ff000"},
- {{mi, r6, 0x000000ab}, false, al, "mi r6 0x000000ab", "mi_r6_0x000000ab"},
- {{hi, r1, 0xf000000f}, false, al, "hi r1 0xf000000f", "hi_r1_0xf000000f"},
- {{mi, r6, 0x2ac00000}, false, al, "mi r6 0x2ac00000", "mi_r6_0x2ac00000"},
- {{vc, r11, 0x000000ff}, false, al, "vc r11 0x000000ff", "vc_r11_"
- "0x000000ff"},
- {{ls, r14, 0x02ac0000}, false, al, "ls r14 0x02ac0000", "ls_r14_"
- "0x02ac0000"},
- {{ge, r5, 0x003fc000}, false, al, "ge r5 0x003fc000", "ge_r5_0x003fc000"},
- {{ls, r12, 0x0000ab00}, false, al, "ls r12 0x0000ab00", "ls_r12_"
- "0x0000ab00"},
- {{cc, r15, 0x00000ab0}, false, al, "cc r15 0x00000ab0", "cc_r15_"
- "0x00000ab0"},
- {{vc, r12, 0x0000ab00}, false, al, "vc r12 0x0000ab00", "vc_r12_"
- "0x0000ab00"},
- {{vs, r2, 0xc000002a}, false, al, "vs r2 0xc000002a", "vs_r2_0xc000002a"},
- {{lt, r7, 0x0ab00000}, false, al, "lt r7 0x0ab00000", "lt_r7_0x0ab00000"},
- {{ls, r6, 0x00000ff0}, false, al, "ls r6 0x00000ff0", "ls_r6_0x00000ff0"},
- {{vc, r10, 0x000000ff}, false, al, "vc r10 0x000000ff", "vc_r10_"
- "0x000000ff"},
- {{ls, r4, 0x0000ab00}, false, al, "ls r4 0x0000ab00", "ls_r4_0x0000ab00"},
- {{mi, r10, 0x003fc000}, false, al, "mi r10 0x003fc000", "mi_r10_"
- "0x003fc000"},
- {{ls, r1, 0x000002ac}, false, al, "ls r1 0x000002ac", "ls_r1_0x000002ac"},
- {{ge, r7, 0xb000000a}, false, al, "ge r7 0xb000000a", "ge_r7_0xb000000a"},
- {{gt, r4, 0xf000000f}, false, al, "gt r4 0xf000000f", "gt_r4_0xf000000f"},
- {{vc, r8, 0x002ac000}, false, al, "vc r8 0x002ac000", "vc_r8_0x002ac000"},
- {{eq, r5, 0x0ab00000}, false, al, "eq r5 0x0ab00000", "eq_r5_0x0ab00000"},
- {{gt, r2, 0xf000000f}, false, al, "gt r2 0xf000000f", "gt_r2_0xf000000f"},
- {{gt, r6, 0xff000000}, false, al, "gt r6 0xff000000", "gt_r6_0xff000000"},
- {{ls, r8, 0x0ab00000}, false, al, "ls r8 0x0ab00000", "ls_r8_0x0ab00000"},
- {{vc, r0, 0xb000000a}, false, al, "vc r0 0xb000000a", "vc_r0_0xb000000a"},
- {{lt, r6, 0x03fc0000}, false, al, "lt r6 0x03fc0000", "lt_r6_0x03fc0000"},
- {{ge, r10, 0x0000ab00}, false, al, "ge r10 0x0000ab00", "ge_r10_"
- "0x0000ab00"},
- {{hi, r4, 0x000ab000}, false, al, "hi r4 0x000ab000", "hi_r4_0x000ab000"},
- {{hi, r11, 0x3fc00000}, false, al, "hi r11 0x3fc00000", "hi_r11_"
- "0x3fc00000"},
- {{vs, r12, 0xc000003f}, false, al, "vs r12 0xc000003f", "vs_r12_"
- "0xc000003f"},
- {{gt, r12, 0xb000000a}, false, al, "gt r12 0xb000000a", "gt_r12_"
- "0xb000000a"},
- {{eq, r11, 0x2ac00000}, false, al, "eq r11 0x2ac00000", "eq_r11_"
- "0x2ac00000"},
- {{hi, r0, 0xc000003f}, false, al, "hi r0 0xc000003f", "hi_r0_0xc000003f"},
- {{cs, r12, 0xac000002}, false, al, "cs r12 0xac000002", "cs_r12_"
- "0xac000002"},
- {{hi, r9, 0x3fc00000}, false, al, "hi r9 0x3fc00000", "hi_r9_0x3fc00000"},
- {{vs, r2, 0x00002ac0}, false, al, "vs r2 0x00002ac0", "vs_r2_0x00002ac0"},
- {{al, r12, 0xb000000a}, false, al, "al r12 0xb000000a", "al_r12_"
- "0xb000000a"},
- {{gt, r12, 0x3fc00000}, false, al, "gt r12 0x3fc00000", "gt_r12_"
- "0x3fc00000"},
- {{gt, r6, 0xf000000f}, false, al, "gt r6 0xf000000f", "gt_r6_0xf000000f"},
- {{vc, r14, 0x000000ff}, false, al, "vc r14 0x000000ff", "vc_r14_"
- "0x000000ff"},
- {{pl, r7, 0x0002ac00}, false, al, "pl r7 0x0002ac00", "pl_r7_0x0002ac00"},
- {{ge, r1, 0x03fc0000}, false, al, "ge r1 0x03fc0000", "ge_r1_0x03fc0000"},
- {{hi, r10, 0x0002ac00}, false, al, "hi r10 0x0002ac00", "hi_r10_"
- "0x0002ac00"},
- {{gt, r4, 0x002ac000}, false, al, "gt r4 0x002ac000", "gt_r4_0x002ac000"},
- {{vc, r5, 0x000000ff}, false, al, "vc r5 0x000000ff", "vc_r5_0x000000ff"},
- {{pl, r15, 0x0000ab00}, false, al, "pl r15 0x0000ab00", "pl_r15_"
- "0x0000ab00"},
- {{cc, r4, 0x00ab0000}, false, al, "cc r4 0x00ab0000", "cc_r4_0x00ab0000"},
- {{pl, r11, 0xff000000}, false, al, "pl r11 0xff000000", "pl_r11_"
- "0xff000000"},
- {{pl, r2, 0xf000000f}, false, al, "pl r2 0xf000000f", "pl_r2_0xf000000f"},
- {{cc, r8, 0xb000000a}, false, al, "cc r8 0xb000000a", "cc_r8_0xb000000a"},
- {{al, r13, 0x000000ff}, false, al, "al r13 0x000000ff", "al_r13_"
- "0x000000ff"},
- {{mi, r6, 0x000003fc}, false, al, "mi r6 0x000003fc", "mi_r6_0x000003fc"},
- {{vs, r13, 0x02ac0000}, false, al, "vs r13 0x02ac0000", "vs_r13_"
- "0x02ac0000"},
- {{mi, r4, 0x00ff0000}, false, al, "mi r4 0x00ff0000", "mi_r4_0x00ff0000"},
- {{cs, r3, 0x000003fc}, false, al, "cs r3 0x000003fc", "cs_r3_0x000003fc"},
- {{pl, r13, 0xab000000}, false, al, "pl r13 0xab000000", "pl_r13_"
- "0xab000000"},
- {{ls, r9, 0x002ac000}, false, al, "ls r9 0x002ac000", "ls_r9_0x002ac000"},
- {{eq, r1, 0xc000002a}, false, al, "eq r1 0xc000002a", "eq_r1_0xc000002a"},
- {{lt, r12, 0x00000000}, false, al, "lt r12 0x00000000", "lt_r12_"
- "0x00000000"},
- {{ge, r14, 0xff000000}, false, al, "ge r14 0xff000000", "ge_r14_"
- "0xff000000"},
- {{lt, r9, 0x002ac000}, false, al, "lt r9 0x002ac000", "lt_r9_0x002ac000"},
- {{lt, r10, 0x00000ff0}, false, al, "lt r10 0x00000ff0", "lt_r10_"
- "0x00000ff0"},
- {{vs, r5, 0x000000ff}, false, al, "vs r5 0x000000ff", "vs_r5_0x000000ff"},
- {{cc, r12, 0x03fc0000}, false, al, "cc r12 0x03fc0000", "cc_r12_"
- "0x03fc0000"},
- {{ne, r4, 0x00000000}, false, al, "ne r4 0x00000000", "ne_r4_0x00000000"},
- {{mi, r13, 0xff000000}, false, al, "mi r13 0xff000000", "mi_r13_"
- "0xff000000"},
- {{ne, r7, 0x00000ff0}, false, al, "ne r7 0x00000ff0", "ne_r7_0x00000ff0"},
- {{vs, r2, 0xc000003f}, false, al, "vs r2 0xc000003f", "vs_r2_0xc000003f"},
- {{al, r5, 0x00ff0000}, false, al, "al r5 0x00ff0000", "al_r5_0x00ff0000"},
- {{hi, r15, 0x00000ff0}, false, al, "hi r15 0x00000ff0", "hi_r15_"
- "0x00000ff0"},
- {{ls, r8, 0x00003fc0}, false, al, "ls r8 0x00003fc0", "ls_r8_0x00003fc0"},
- {{vs, r0, 0xff000000}, false, al, "vs r0 0xff000000", "vs_r0_0xff000000"},
- {{vs, r6, 0x000000ab}, false, al, "vs r6 0x000000ab", "vs_r6_0x000000ab"},
- {{cs, r9, 0x00ab0000}, false, al, "cs r9 0x00ab0000", "cs_r9_0x00ab0000"},
- {{hi, r1, 0x0002ac00}, false, al, "hi r1 0x0002ac00", "hi_r1_0x0002ac00"},
- {{hi, r15, 0x2ac00000}, false, al, "hi r15 0x2ac00000", "hi_r15_"
- "0x2ac00000"},
- {{hi, r6, 0x0002ac00}, false, al, "hi r6 0x0002ac00", "hi_r6_0x0002ac00"},
- {{ge, r4, 0xc000003f}, false, al, "ge r4 0xc000003f", "ge_r4_0xc000003f"},
- {{ls, r10, 0x0000ff00}, false, al, "ls r10 0x0000ff00", "ls_r10_"
- "0x0000ff00"},
- {{ne, r11, 0x000003fc}, false, al, "ne r11 0x000003fc", "ne_r11_"
- "0x000003fc"},
- {{ls, r3, 0x0002ac00}, false, al, "ls r3 0x0002ac00", "ls_r3_0x0002ac00"},
- {{al, r12, 0x000003fc}, false, al, "al r12 0x000003fc", "al_r12_"
- "0x000003fc"},
- {{le, r7, 0xf000000f}, false, al, "le r7 0xf000000f", "le_r7_0xf000000f"},
- {{al, r11, 0x00000ab0}, false, al, "al r11 0x00000ab0", "al_r11_"
- "0x00000ab0"},
- {{cs, r13, 0x02ac0000}, false, al, "cs r13 0x02ac0000", "cs_r13_"
- "0x02ac0000"},
- {{hi, r1, 0x00000ff0}, false, al, "hi r1 0x00000ff0", "hi_r1_0x00000ff0"},
- {{le, r11, 0x3fc00000}, false, al, "le r11 0x3fc00000", "le_r11_"
- "0x3fc00000"},
- {{hi, r9, 0x000003fc}, false, al, "hi r9 0x000003fc", "hi_r9_0x000003fc"},
- {{mi, r13, 0x000002ac}, false, al, "mi r13 0x000002ac", "mi_r13_"
- "0x000002ac"},
- {{lt, r12, 0x000003fc}, false, al, "lt r12 0x000003fc", "lt_r12_"
- "0x000003fc"},
- {{lt, r14, 0x00000ab0}, false, al, "lt r14 0x00000ab0", "lt_r14_"
- "0x00000ab0"},
- {{gt, r1, 0x3fc00000}, false, al, "gt r1 0x3fc00000", "gt_r1_0x3fc00000"},
- {{cc, r14, 0xb000000a}, false, al, "cc r14 0xb000000a", "cc_r14_"
- "0xb000000a"},
- {{ge, r0, 0x000002ac}, false, al, "ge r0 0x000002ac", "ge_r0_0x000002ac"},
- {{eq, r12, 0x000003fc}, false, al, "eq r12 0x000003fc", "eq_r12_"
- "0x000003fc"},
- {{vc, r13, 0x0ab00000}, false, al, "vc r13 0x0ab00000", "vc_r13_"
- "0x0ab00000"},
- {{pl, r10, 0x0003fc00}, false, al, "pl r10 0x0003fc00", "pl_r10_"
- "0x0003fc00"},
- {{le, r7, 0x0000ff00}, false, al, "le r7 0x0000ff00", "le_r7_0x0000ff00"},
- {{eq, r5, 0x0003fc00}, false, al, "eq r5 0x0003fc00", "eq_r5_0x0003fc00"},
- {{pl, r1, 0xfc000003}, false, al, "pl r1 0xfc000003", "pl_r1_0xfc000003"},
- {{gt, r15, 0x000ff000}, false, al, "gt r15 0x000ff000", "gt_r15_"
- "0x000ff000"},
- {{mi, r2, 0xb000000a}, false, al, "mi r2 0xb000000a", "mi_r2_0xb000000a"},
- {{cs, r8, 0x0000ff00}, false, al, "cs r8 0x0000ff00", "cs_r8_0x0000ff00"},
- {{vs, r8, 0x00002ac0}, false, al, "vs r8 0x00002ac0", "vs_r8_0x00002ac0"},
- {{cs, r5, 0x00ab0000}, false, al, "cs r5 0x00ab0000", "cs_r5_0x00ab0000"},
- {{pl, r3, 0x00ab0000}, false, al, "pl r3 0x00ab0000", "pl_r3_0x00ab0000"},
- {{hi, r5, 0x02ac0000}, false, al, "hi r5 0x02ac0000", "hi_r5_0x02ac0000"},
- {{cc, r9, 0x000002ac}, false, al, "cc r9 0x000002ac", "cc_r9_0x000002ac"},
- {{ls, r13, 0x0000ab00}, false, al, "ls r13 0x0000ab00", "ls_r13_"
- "0x0000ab00"},
- {{pl, r11, 0x00000ab0}, false, al, "pl r11 0x00000ab0", "pl_r11_"
- "0x00000ab0"},
- {{ge, r14, 0x3fc00000}, false, al, "ge r14 0x3fc00000", "ge_r14_"
- "0x3fc00000"},
- {{al, r14, 0x0000ab00}, false, al, "al r14 0x0000ab00", "al_r14_"
- "0x0000ab00"},
- {{lt, r6, 0xac000002}, false, al, "lt r6 0xac000002", "lt_r6_0xac000002"},
- {{vc, r3, 0x000ff000}, false, al, "vc r3 0x000ff000", "vc_r3_0x000ff000"},
- {{ne, r8, 0xfc000003}, false, al, "ne r8 0xfc000003", "ne_r8_0xfc000003"},
- {{cs, r6, 0x000ab000}, false, al, "cs r6 0x000ab000", "cs_r6_0x000ab000"},
- {{hi, r15, 0x0002ac00}, false, al, "hi r15 0x0002ac00", "hi_r15_"
- "0x0002ac00"},
- {{pl, r6, 0x00000ff0}, false, al, "pl r6 0x00000ff0", "pl_r6_0x00000ff0"},
- {{hi, r15, 0x03fc0000}, false, al, "hi r15 0x03fc0000", "hi_r15_"
- "0x03fc0000"},
- {{cc, r6, 0x0003fc00}, false, al, "cc r6 0x0003fc00", "cc_r6_0x0003fc00"},
- {{eq, r12, 0x000002ac}, false, al, "eq r12 0x000002ac", "eq_r12_"
- "0x000002ac"},
- {{ls, r11, 0x02ac0000}, false, al, "ls r11 0x02ac0000", "ls_r11_"
- "0x02ac0000"},
- {{ge, r13, 0x00ff0000}, false, al, "ge r13 0x00ff0000", "ge_r13_"
- "0x00ff0000"},
- {{lt, r4, 0x0003fc00}, false, al, "lt r4 0x0003fc00", "lt_r4_0x0003fc00"},
- {{mi, r0, 0x0000ab00}, false, al, "mi r0 0x0000ab00", "mi_r0_0x0000ab00"},
- {{lt, r4, 0x000000ab}, false, al, "lt r4 0x000000ab", "lt_r4_0x000000ab"},
- {{ls, r2, 0xc000003f}, false, al, "ls r2 0xc000003f", "ls_r2_0xc000003f"},
- {{pl, r1, 0x000000ab}, false, al, "pl r1 0x000000ab", "pl_r1_0x000000ab"},
- {{ne, r10, 0x0000ff00}, false, al, "ne r10 0x0000ff00", "ne_r10_"
- "0x0000ff00"},
- {{vc, r15, 0x00000ab0}, false, al, "vc r15 0x00000ab0", "vc_r15_"
- "0x00000ab0"},
- {{eq, r6, 0x02ac0000}, false, al, "eq r6 0x02ac0000", "eq_r6_0x02ac0000"},
- {{cc, r11, 0x00000000}, false, al, "cc r11 0x00000000", "cc_r11_"
- "0x00000000"},
- {{mi, r7, 0x002ac000}, false, al, "mi r7 0x002ac000", "mi_r7_0x002ac000"},
- {{hi, r14, 0xab000000}, false, al, "hi r14 0xab000000", "hi_r14_"
- "0xab000000"},
- {{vc, r6, 0x0000ff00}, false, al, "vc r6 0x0000ff00", "vc_r6_0x0000ff00"},
- {{al, r5, 0x000002ac}, false, al, "al r5 0x000002ac", "al_r5_0x000002ac"},
- {{cc, r12, 0x0002ac00}, false, al, "cc r12 0x0002ac00", "cc_r12_"
- "0x0002ac00"},
- {{cc, r10, 0x000000ab}, false, al, "cc r10 0x000000ab", "cc_r10_"
- "0x000000ab"},
- {{gt, r5, 0x000002ac}, false, al, "gt r5 0x000002ac", "gt_r5_0x000002ac"},
- {{vc, r3, 0x00000000}, false, al, "vc r3 0x00000000", "vc_r3_0x00000000"},
- {{gt, r12, 0xac000002}, false, al, "gt r12 0xac000002", "gt_r12_"
- "0xac000002"},
- {{al, r10, 0x00ab0000}, false, al, "al r10 0x00ab0000", "al_r10_"
- "0x00ab0000"},
- {{mi, r5, 0x000ff000}, false, al, "mi r5 0x000ff000", "mi_r5_0x000ff000"},
- {{pl, r1, 0x00000ff0}, false, al, "pl r1 0x00000ff0", "pl_r1_0x00000ff0"},
- {{lt, r7, 0xf000000f}, false, al, "lt r7 0xf000000f", "lt_r7_0xf000000f"},
- {{ge, r14, 0x002ac000}, false, al, "ge r14 0x002ac000", "ge_r14_"
- "0x002ac000"},
- {{cc, r0, 0xac000002}, false, al, "cc r0 0xac000002", "cc_r0_0xac000002"},
- {{cs, r2, 0x00000ab0}, false, al, "cs r2 0x00000ab0", "cs_r2_0x00000ab0"},
- {{vs, r0, 0x00002ac0}, false, al, "vs r0 0x00002ac0", "vs_r0_0x00002ac0"},
- {{le, r10, 0x000ab000}, false, al, "le r10 0x000ab000", "le_r10_"
- "0x000ab000"},
- {{ge, r9, 0x0003fc00}, false, al, "ge r9 0x0003fc00", "ge_r9_0x0003fc00"},
- {{lt, r1, 0x00003fc0}, false, al, "lt r1 0x00003fc0", "lt_r1_0x00003fc0"},
- {{ge, r5, 0x000000ff}, false, al, "ge r5 0x000000ff", "ge_r5_0x000000ff"},
- {{le, r11, 0x2ac00000}, false, al, "le r11 0x2ac00000", "le_r11_"
- "0x2ac00000"},
- {{le, r9, 0x002ac000}, false, al, "le r9 0x002ac000", "le_r9_0x002ac000"},
- {{hi, r12, 0xf000000f}, false, al, "hi r12 0xf000000f", "hi_r12_"
- "0xf000000f"},
- {{lt, r3, 0x02ac0000}, false, al, "lt r3 0x02ac0000", "lt_r3_0x02ac0000"},
- {{al, r13, 0x2ac00000}, false, al, "al r13 0x2ac00000", "al_r13_"
- "0x2ac00000"},
- {{vs, r12, 0x00000ab0}, false, al, "vs r12 0x00000ab0", "vs_r12_"
- "0x00000ab0"},
- {{gt, r3, 0x3fc00000}, false, al, "gt r3 0x3fc00000", "gt_r3_0x3fc00000"},
- {{gt, r0, 0x2ac00000}, false, al, "gt r0 0x2ac00000", "gt_r0_0x2ac00000"},
- {{eq, r15, 0x000002ac}, false, al, "eq r15 0x000002ac", "eq_r15_"
- "0x000002ac"},
- {{gt, r1, 0x000ab000}, false, al, "gt r1 0x000ab000", "gt_r1_0x000ab000"},
- {{gt, r2, 0x2ac00000}, false, al, "gt r2 0x2ac00000", "gt_r2_0x2ac00000"},
- {{mi, r15, 0x00ab0000}, false, al, "mi r15 0x00ab0000", "mi_r15_"
- "0x00ab0000"},
- {{mi, r1, 0x000ab000}, false, al, "mi r1 0x000ab000", "mi_r1_0x000ab000"},
- {{ge, r12, 0x0ab00000}, false, al, "ge r12 0x0ab00000", "ge_r12_"
- "0x0ab00000"},
- {{gt, r5, 0x000000ab}, false, al, "gt r5 0x000000ab", "gt_r5_0x000000ab"},
- {{gt, r4, 0x00000000}, false, al, "gt r4 0x00000000", "gt_r4_0x00000000"},
- {{al, r13, 0xc000003f}, false, al, "al r13 0xc000003f", "al_r13_"
- "0xc000003f"},
- {{ls, r7, 0xff000000}, false, al, "ls r7 0xff000000", "ls_r7_0xff000000"},
- {{vs, r0, 0x00000ff0}, false, al, "vs r0 0x00000ff0", "vs_r0_0x00000ff0"},
- {{hi, r9, 0x02ac0000}, false, al, "hi r9 0x02ac0000", "hi_r9_0x02ac0000"},
- {{cs, r1, 0xc000002a}, false, al, "cs r1 0xc000002a", "cs_r1_0xc000002a"},
- {{hi, r8, 0xf000000f}, false, al, "hi r8 0xf000000f", "hi_r8_0xf000000f"},
- {{gt, r1, 0xb000000a}, false, al, "gt r1 0xb000000a", "gt_r1_0xb000000a"},
- {{gt, r2, 0x0002ac00}, false, al, "gt r2 0x0002ac00", "gt_r2_0x0002ac00"},
- {{vs, r5, 0x000000ab}, false, al, "vs r5 0x000000ab", "vs_r5_0x000000ab"},
- {{cc, r12, 0x000ff000}, false, al, "cc r12 0x000ff000", "cc_r12_"
- "0x000ff000"},
- {{ge, r0, 0x00003fc0}, false, al, "ge r0 0x00003fc0", "ge_r0_0x00003fc0"},
- {{ls, r12, 0x00ab0000}, false, al, "ls r12 0x00ab0000", "ls_r12_"
- "0x00ab0000"},
- {{vs, r4, 0x000003fc}, false, al, "vs r4 0x000003fc", "vs_r4_0x000003fc"},
- {{ls, r4, 0x00003fc0}, false, al, "ls r4 0x00003fc0", "ls_r4_0x00003fc0"},
- {{eq, r9, 0xb000000a}, false, al, "eq r9 0xb000000a", "eq_r9_0xb000000a"},
- {{cs, r9, 0x2ac00000}, false, al, "cs r9 0x2ac00000", "cs_r9_0x2ac00000"},
- {{vs, r12, 0x0000ff00}, false, al, "vs r12 0x0000ff00", "vs_r12_"
- "0x0000ff00"},
- {{vc, r1, 0x0000ff00}, false, al, "vc r1 0x0000ff00", "vc_r1_0x0000ff00"},
- {{hi, r12, 0xff000000}, false, al, "hi r12 0xff000000", "hi_r12_"
- "0xff000000"},
- {{cs, r12, 0x0002ac00}, false, al, "cs r12 0x0002ac00", "cs_r12_"
- "0x0002ac00"},
- {{mi, r11, 0x03fc0000}, false, al, "mi r11 0x03fc0000", "mi_r11_"
- "0x03fc0000"},
- {{eq, r2, 0x000ff000}, false, al, "eq r2 0x000ff000", "eq_r2_0x000ff000"},
- {{al, r6, 0x00000ff0}, false, al, "al r6 0x00000ff0", "al_r6_0x00000ff0"},
- {{cs, r7, 0x000003fc}, false, al, "cs r7 0x000003fc", "cs_r7_0x000003fc"},
- {{pl, r11, 0xb000000a}, false, al, "pl r11 0xb000000a", "pl_r11_"
- "0xb000000a"},
- {{ne, r15, 0x000ff000}, false, al, "ne r15 0x000ff000", "ne_r15_"
- "0x000ff000"},
- {{mi, r14, 0x00ab0000}, false, al, "mi r14 0x00ab0000", "mi_r14_"
- "0x00ab0000"},
- {{hi, r4, 0x0000ff00}, false, al, "hi r4 0x0000ff00", "hi_r4_0x0000ff00"},
- {{ge, r1, 0x000002ac}, false, al, "ge r1 0x000002ac", "ge_r1_0x000002ac"},
- {{gt, r7, 0xb000000a}, false, al, "gt r7 0xb000000a", "gt_r7_0xb000000a"},
- {{gt, r2, 0x00000000}, false, al, "gt r2 0x00000000", "gt_r2_0x00000000"},
- {{cc, r2, 0xb000000a}, false, al, "cc r2 0xb000000a", "cc_r2_0xb000000a"},
- {{vs, r14, 0x000ab000}, false, al, "vs r14 0x000ab000", "vs_r14_"
- "0x000ab000"},
- {{lt, r5, 0x000002ac}, false, al, "lt r5 0x000002ac", "lt_r5_0x000002ac"},
- {{cc, r13, 0x0000ff00}, false, al, "cc r13 0x0000ff00", "cc_r13_"
- "0x0000ff00"},
- {{hi, r15, 0x000002ac}, false, al, "hi r15 0x000002ac", "hi_r15_"
- "0x000002ac"},
- {{ge, r1, 0x00ff0000}, false, al, "ge r1 0x00ff0000", "ge_r1_0x00ff0000"},
- {{lt, r15, 0x00002ac0}, false, al, "lt r15 0x00002ac0", "lt_r15_"
- "0x00002ac0"},
- {{lt, r8, 0x000ff000}, false, al, "lt r8 0x000ff000", "lt_r8_0x000ff000"},
- {{hi, r10, 0xc000002a}, false, al, "hi r10 0xc000002a", "hi_r10_"
- "0xc000002a"},
- {{eq, r12, 0x000ab000}, false, al, "eq r12 0x000ab000", "eq_r12_"
- "0x000ab000"},
- {{vs, r11, 0x00002ac0}, false, al, "vs r11 0x00002ac0", "vs_r11_"
- "0x00002ac0"},
- {{hi, r10, 0x000003fc}, false, al, "hi r10 0x000003fc", "hi_r10_"
- "0x000003fc"},
- {{cc, r8, 0x000003fc}, false, al, "cc r8 0x000003fc", "cc_r8_0x000003fc"},
- {{vc, r11, 0x00000ab0}, false, al, "vc r11 0x00000ab0", "vc_r11_"
- "0x00000ab0"},
- {{le, r3, 0xac000002}, false, al, "le r3 0xac000002", "le_r3_0xac000002"},
- {{cc, r11, 0xc000002a}, false, al, "cc r11 0xc000002a", "cc_r11_"
- "0xc000002a"},
- {{lt, r6, 0xab000000}, false, al, "lt r6 0xab000000", "lt_r6_0xab000000"},
- {{hi, r1, 0x00003fc0}, false, al, "hi r1 0x00003fc0", "hi_r1_0x00003fc0"},
- {{vc, r3, 0x00002ac0}, false, al, "vc r3 0x00002ac0", "vc_r3_0x00002ac0"},
- {{vc, r6, 0x00000ab0}, false, al, "vc r6 0x00000ab0", "vc_r6_0x00000ab0"},
- {{ls, r6, 0x03fc0000}, false, al, "ls r6 0x03fc0000", "ls_r6_0x03fc0000"},
- {{hi, r11, 0x0ab00000}, false, al, "hi r11 0x0ab00000", "hi_r11_"
- "0x0ab00000"},
- {{lt, r12, 0x0002ac00}, false, al, "lt r12 0x0002ac00", "lt_r12_"
- "0x0002ac00"},
- {{al, r8, 0xab000000}, false, al, "al r8 0xab000000", "al_r8_0xab000000"},
- {{vs, r2, 0x00000ab0}, false, al, "vs r2 0x00000ab0", "vs_r2_0x00000ab0"},
- {{hi, r14, 0x02ac0000}, false, al, "hi r14 0x02ac0000", "hi_r14_"
- "0x02ac0000"},
- {{cs, r3, 0x00000ff0}, false, al, "cs r3 0x00000ff0", "cs_r3_0x00000ff0"},
- {{cc, r9, 0xb000000a}, false, al, "cc r9 0xb000000a", "cc_r9_0xb000000a"},
- {{vc, r9, 0x00000ff0}, false, al, "vc r9 0x00000ff0", "vc_r9_0x00000ff0"},
- {{ne, r9, 0xab000000}, false, al, "ne r9 0xab000000", "ne_r9_0xab000000"},
- {{cc, r10, 0xb000000a}, false, al, "cc r10 0xb000000a", "cc_r10_"
- "0xb000000a"},
- {{ls, r11, 0xb000000a}, false, al, "ls r11 0xb000000a", "ls_r11_"
- "0xb000000a"},
- {{lt, r11, 0x00ff0000}, false, al, "lt r11 0x00ff0000", "lt_r11_"
- "0x00ff0000"},
- {{lt, r3, 0x000003fc}, false, al, "lt r3 0x000003fc", "lt_r3_0x000003fc"},
- {{gt, r14, 0x00002ac0}, false, al, "gt r14 0x00002ac0", "gt_r14_"
- "0x00002ac0"},
- {{ls, r8, 0xc000003f}, false, al, "ls r8 0xc000003f", "ls_r8_0xc000003f"},
- {{al, r11, 0x000ab000}, false, al, "al r11 0x000ab000", "al_r11_"
- "0x000ab000"},
- {{lt, r7, 0x000ab000}, false, al, "lt r7 0x000ab000", "lt_r7_0x000ab000"},
- {{vs, r14, 0xff000000}, false, al, "vs r14 0xff000000", "vs_r14_"
- "0xff000000"},
- {{vc, r2, 0xab000000}, false, al, "vc r2 0xab000000", "vc_r2_0xab000000"},
- {{ne, r3, 0x00000ff0}, false, al, "ne r3 0x00000ff0", "ne_r3_0x00000ff0"},
- {{ne, r15, 0x02ac0000}, false, al, "ne r15 0x02ac0000", "ne_r15_"
- "0x02ac0000"},
- {{gt, r3, 0x000ff000}, false, al, "gt r3 0x000ff000", "gt_r3_0x000ff000"},
- {{pl, r1, 0x2ac00000}, false, al, "pl r1 0x2ac00000", "pl_r1_0x2ac00000"},
- {{mi, r1, 0x00002ac0}, false, al, "mi r1 0x00002ac0", "mi_r1_0x00002ac0"},
- {{vc, r6, 0xac000002}, false, al, "vc r6 0xac000002", "vc_r6_0xac000002"},
- {{vs, r2, 0x0ff00000}, false, al, "vs r2 0x0ff00000", "vs_r2_0x0ff00000"},
- {{ge, r2, 0x000003fc}, false, al, "ge r2 0x000003fc", "ge_r2_0x000003fc"},
- {{cs, r15, 0x0000ff00}, false, al, "cs r15 0x0000ff00", "cs_r15_"
- "0x0000ff00"},
- {{lt, r3, 0x000002ac}, false, al, "lt r3 0x000002ac", "lt_r3_0x000002ac"},
- {{cs, r6, 0xff000000}, false, al, "cs r6 0xff000000", "cs_r6_0xff000000"},
- {{ge, r14, 0x000000ff}, false, al, "ge r14 0x000000ff", "ge_r14_"
- "0x000000ff"},
- {{gt, r7, 0x03fc0000}, false, al, "gt r7 0x03fc0000", "gt_r7_0x03fc0000"},
- {{ne, r8, 0x000ff000}, false, al, "ne r8 0x000ff000", "ne_r8_0x000ff000"},
- {{gt, r14, 0xc000002a}, false, al, "gt r14 0xc000002a", "gt_r14_"
- "0xc000002a"},
- {{hi, r12, 0x0000ff00}, false, al, "hi r12 0x0000ff00", "hi_r12_"
- "0x0000ff00"},
- {{le, r15, 0x00003fc0}, false, al, "le r15 0x00003fc0", "le_r15_"
- "0x00003fc0"},
- {{eq, r13, 0x000ab000}, false, al, "eq r13 0x000ab000", "eq_r13_"
- "0x000ab000"},
- {{vc, r7, 0x000ab000}, false, al, "vc r7 0x000ab000", "vc_r7_0x000ab000"},
- {{gt, r7, 0xf000000f}, false, al, "gt r7 0xf000000f", "gt_r7_0xf000000f"},
- {{cc, r6, 0xac000002}, false, al, "cc r6 0xac000002", "cc_r6_0xac000002"},
- {{cs, r14, 0x000000ff}, false, al, "cs r14 0x000000ff", "cs_r14_"
- "0x000000ff"},
- {{ne, r2, 0x0003fc00}, false, al, "ne r2 0x0003fc00", "ne_r2_0x0003fc00"},
- {{vs, r1, 0x002ac000}, false, al, "vs r1 0x002ac000", "vs_r1_0x002ac000"},
- {{eq, r8, 0x002ac000}, false, al, "eq r8 0x002ac000", "eq_r8_0x002ac000"},
- {{lt, r8, 0x0000ff00}, false, al, "lt r8 0x0000ff00", "lt_r8_0x0000ff00"},
- {{vs, r9, 0xc000003f}, false, al, "vs r9 0xc000003f", "vs_r9_0xc000003f"},
- {{mi, r11, 0xff000000}, false, al, "mi r11 0xff000000", "mi_r11_"
- "0xff000000"},
- {{cs, r12, 0x03fc0000}, false, al, "cs r12 0x03fc0000", "cs_r12_"
- "0x03fc0000"},
- {{lt, r5, 0xc000002a}, false, al, "lt r5 0xc000002a", "lt_r5_0xc000002a"},
- {{vc, r6, 0x000000ab}, false, al, "vc r6 0x000000ab", "vc_r6_0x000000ab"},
- {{ls, r10, 0x0ab00000}, false, al, "ls r10 0x0ab00000", "ls_r10_"
- "0x0ab00000"},
- {{al, r11, 0x00ff0000}, false, al, "al r11 0x00ff0000", "al_r11_"
- "0x00ff0000"},
- {{hi, r13, 0x00000ab0}, false, al, "hi r13 0x00000ab0", "hi_r13_"
- "0x00000ab0"},
- {{ls, r0, 0xab000000}, false, al, "ls r0 0xab000000", "ls_r0_0xab000000"},
- {{le, r5, 0xab000000}, false, al, "le r5 0xab000000", "le_r5_0xab000000"},
- {{vs, r4, 0x00ff0000}, false, al, "vs r4 0x00ff0000", "vs_r4_0x00ff0000"},
- {{al, r10, 0x03fc0000}, false, al, "al r10 0x03fc0000", "al_r10_"
- "0x03fc0000"},
- {{al, r8, 0x000003fc}, false, al, "al r8 0x000003fc", "al_r8_0x000003fc"},
- {{vs, r11, 0xab000000}, false, al, "vs r11 0xab000000", "vs_r11_"
- "0xab000000"},
- {{eq, r2, 0x00000ff0}, false, al, "eq r2 0x00000ff0", "eq_r2_0x00000ff0"},
- {{vc, r4, 0x00000ff0}, false, al, "vc r4 0x00000ff0", "vc_r4_0x00000ff0"},
- {{vc, r9, 0x00002ac0}, false, al, "vc r9 0x00002ac0", "vc_r9_0x00002ac0"},
- {{cc, r11, 0x00ff0000}, false, al, "cc r11 0x00ff0000", "cc_r11_"
- "0x00ff0000"},
- {{cc, r13, 0x00ff0000}, false, al, "cc r13 0x00ff0000", "cc_r13_"
- "0x00ff0000"},
- {{pl, r0, 0x00000ab0}, false, al, "pl r0 0x00000ab0", "pl_r0_0x00000ab0"},
- {{al, r2, 0x02ac0000}, false, al, "al r2 0x02ac0000", "al_r2_0x02ac0000"},
- {{hi, r11, 0xc000002a}, false, al, "hi r11 0xc000002a", "hi_r11_"
- "0xc000002a"},
- {{ne, r3, 0xf000000f}, false, al, "ne r3 0xf000000f", "ne_r3_0xf000000f"},
- {{cc, r15, 0x0ab00000}, false, al, "cc r15 0x0ab00000", "cc_r15_"
- "0x0ab00000"},
- {{ge, r12, 0x00ff0000}, false, al, "ge r12 0x00ff0000", "ge_r12_"
- "0x00ff0000"},
- {{le, r12, 0x002ac000}, false, al, "le r12 0x002ac000", "le_r12_"
- "0x002ac000"},
- {{mi, r12, 0xc000003f}, false, al, "mi r12 0xc000003f", "mi_r12_"
- "0xc000003f"},
- {{lt, r0, 0xfc000003}, false, al, "lt r0 0xfc000003", "lt_r0_0xfc000003"},
- {{vc, r15, 0x000ab000}, false, al, "vc r15 0x000ab000", "vc_r15_"
- "0x000ab000"},
- {{pl, r5, 0x3fc00000}, false, al, "pl r5 0x3fc00000", "pl_r5_0x3fc00000"},
- {{vs, r15, 0x00ab0000}, false, al, "vs r15 0x00ab0000", "vs_r15_"
- "0x00ab0000"},
- {{hi, r3, 0x00ff0000}, false, al, "hi r3 0x00ff0000", "hi_r3_0x00ff0000"},
- {{lt, r8, 0x000000ff}, false, al, "lt r8 0x000000ff", "lt_r8_0x000000ff"},
- {{le, r2, 0x000000ff}, false, al, "le r2 0x000000ff", "le_r2_0x000000ff"},
- {{vs, r0, 0x0002ac00}, false, al, "vs r0 0x0002ac00", "vs_r0_0x0002ac00"},
- {{vs, r2, 0xff000000}, false, al, "vs r2 0xff000000", "vs_r2_0xff000000"},
- {{pl, r6, 0xab000000}, false, al, "pl r6 0xab000000", "pl_r6_0xab000000"},
- {{ls, r4, 0x3fc00000}, false, al, "ls r4 0x3fc00000", "ls_r4_0x3fc00000"},
- {{ls, r3, 0x000ab000}, false, al, "ls r3 0x000ab000", "ls_r3_0x000ab000"},
- {{eq, r11, 0x000ab000}, false, al, "eq r11 0x000ab000", "eq_r11_"
- "0x000ab000"},
- {{vc, r6, 0x03fc0000}, false, al, "vc r6 0x03fc0000", "vc_r6_0x03fc0000"},
- {{mi, r14, 0x0000ab00}, false, al, "mi r14 0x0000ab00", "mi_r14_"
- "0x0000ab00"},
- {{pl, r8, 0xab000000}, false, al, "pl r8 0xab000000", "pl_r8_0xab000000"},
- {{pl, r8, 0xc000003f}, false, al, "pl r8 0xc000003f", "pl_r8_0xc000003f"},
- {{eq, r14, 0x003fc000}, false, al, "eq r14 0x003fc000", "eq_r14_"
- "0x003fc000"},
- {{vs, r9, 0x00ff0000}, false, al, "vs r9 0x00ff0000", "vs_r9_0x00ff0000"},
- {{vs, r1, 0x00002ac0}, false, al, "vs r1 0x00002ac0", "vs_r1_0x00002ac0"},
- {{le, r1, 0x00ff0000}, false, al, "le r1 0x00ff0000", "le_r1_0x00ff0000"},
- {{lt, r7, 0x000ff000}, false, al, "lt r7 0x000ff000", "lt_r7_0x000ff000"},
- {{mi, r6, 0x002ac000}, false, al, "mi r6 0x002ac000", "mi_r6_0x002ac000"},
- {{vc, r11, 0xc000003f}, false, al, "vc r11 0xc000003f", "vc_r11_"
- "0xc000003f"},
- {{lt, r4, 0x00000000}, false, al, "lt r4 0x00000000", "lt_r4_0x00000000"},
- {{pl, r0, 0xac000002}, false, al, "pl r0 0xac000002", "pl_r0_0xac000002"},
- {{ls, r10, 0xc000003f}, false, al, "ls r10 0xc000003f", "ls_r10_"
- "0xc000003f"},
- {{cc, r15, 0xc000002a},
- false,
- al,
- "cc r15 0xc000002a",
- "cc_r15_0xc000002a"}};
+const TestData kTests[] =
+ {{{ls, r10, 0x00ab0000},
+ false,
+ al,
+ "ls r10 0x00ab0000",
+ "ls_r10_"
+ "0x00ab0000"},
+ {{ls, r13, 0xf000000f},
+ false,
+ al,
+ "ls r13 0xf000000f",
+ "ls_r13_"
+ "0xf000000f"},
+ {{pl, r4, 0x00003fc0}, false, al, "pl r4 0x00003fc0", "pl_r4_0x00003fc0"},
+ {{ne, r15, 0x00ab0000},
+ false,
+ al,
+ "ne r15 0x00ab0000",
+ "ne_r15_"
+ "0x00ab0000"},
+ {{lt, r14, 0x002ac000},
+ false,
+ al,
+ "lt r14 0x002ac000",
+ "lt_r14_"
+ "0x002ac000"},
+ {{eq, r15, 0x000000ab},
+ false,
+ al,
+ "eq r15 0x000000ab",
+ "eq_r15_"
+ "0x000000ab"},
+ {{al, r6, 0x002ac000}, false, al, "al r6 0x002ac000", "al_r6_0x002ac000"},
+ {{pl, r2, 0x0002ac00}, false, al, "pl r2 0x0002ac00", "pl_r2_0x0002ac00"},
+ {{mi, r8, 0x00000000}, false, al, "mi r8 0x00000000", "mi_r8_0x00000000"},
+ {{pl, r13, 0x02ac0000},
+ false,
+ al,
+ "pl r13 0x02ac0000",
+ "pl_r13_"
+ "0x02ac0000"},
+ {{ge, r9, 0xac000002}, false, al, "ge r9 0xac000002", "ge_r9_0xac000002"},
+ {{ne, r3, 0x000003fc}, false, al, "ne r3 0x000003fc", "ne_r3_0x000003fc"},
+ {{pl, r10, 0xf000000f},
+ false,
+ al,
+ "pl r10 0xf000000f",
+ "pl_r10_"
+ "0xf000000f"},
+ {{cc, r0, 0x00003fc0}, false, al, "cc r0 0x00003fc0", "cc_r0_0x00003fc0"},
+ {{gt, r6, 0x002ac000}, false, al, "gt r6 0x002ac000", "gt_r6_0x002ac000"},
+ {{cs, r1, 0x00ff0000}, false, al, "cs r1 0x00ff0000", "cs_r1_0x00ff0000"},
+ {{lt, r8, 0x000002ac}, false, al, "lt r8 0x000002ac", "lt_r8_0x000002ac"},
+ {{vc, r4, 0x2ac00000}, false, al, "vc r4 0x2ac00000", "vc_r4_0x2ac00000"},
+ {{al, r11, 0x00003fc0},
+ false,
+ al,
+ "al r11 0x00003fc0",
+ "al_r11_"
+ "0x00003fc0"},
+ {{lt, r9, 0x000000ab}, false, al, "lt r9 0x000000ab", "lt_r9_0x000000ab"},
+ {{le, r15, 0x0000ff00},
+ false,
+ al,
+ "le r15 0x0000ff00",
+ "le_r15_"
+ "0x0000ff00"},
+ {{vc, r9, 0xff000000}, false, al, "vc r9 0xff000000", "vc_r9_0xff000000"},
+ {{ge, r0, 0x0ff00000}, false, al, "ge r0 0x0ff00000", "ge_r0_0x0ff00000"},
+ {{vc, r1, 0x00ff0000}, false, al, "vc r1 0x00ff0000", "vc_r1_0x00ff0000"},
+ {{vs, r12, 0x00000ff0},
+ false,
+ al,
+ "vs r12 0x00000ff0",
+ "vs_r12_"
+ "0x00000ff0"},
+ {{pl, r13, 0x003fc000},
+ false,
+ al,
+ "pl r13 0x003fc000",
+ "pl_r13_"
+ "0x003fc000"},
+ {{eq, r4, 0xff000000}, false, al, "eq r4 0xff000000", "eq_r4_0xff000000"},
+ {{eq, r12, 0xac000002},
+ false,
+ al,
+ "eq r12 0xac000002",
+ "eq_r12_"
+ "0xac000002"},
+ {{al, r11, 0x000000ab},
+ false,
+ al,
+ "al r11 0x000000ab",
+ "al_r11_"
+ "0x000000ab"},
+ {{ge, r10, 0x00ff0000},
+ false,
+ al,
+ "ge r10 0x00ff0000",
+ "ge_r10_"
+ "0x00ff0000"},
+ {{vs, r7, 0x0002ac00}, false, al, "vs r7 0x0002ac00", "vs_r7_0x0002ac00"},
+ {{le, r7, 0x002ac000}, false, al, "le r7 0x002ac000", "le_r7_0x002ac000"},
+ {{vc, r9, 0x000003fc}, false, al, "vc r9 0x000003fc", "vc_r9_0x000003fc"},
+ {{vs, r14, 0x002ac000},
+ false,
+ al,
+ "vs r14 0x002ac000",
+ "vs_r14_"
+ "0x002ac000"},
+ {{vs, r1, 0x000003fc}, false, al, "vs r1 0x000003fc", "vs_r1_0x000003fc"},
+ {{le, r13, 0x3fc00000},
+ false,
+ al,
+ "le r13 0x3fc00000",
+ "le_r13_"
+ "0x3fc00000"},
+ {{ne, r9, 0x0002ac00}, false, al, "ne r9 0x0002ac00", "ne_r9_0x0002ac00"},
+ {{al, r0, 0x00ab0000}, false, al, "al r0 0x00ab0000", "al_r0_0x00ab0000"},
+ {{hi, r5, 0xff000000}, false, al, "hi r5 0xff000000", "hi_r5_0xff000000"},
+ {{hi, r14, 0x000000ab},
+ false,
+ al,
+ "hi r14 0x000000ab",
+ "hi_r14_"
+ "0x000000ab"},
+ {{hi, r14, 0x000003fc},
+ false,
+ al,
+ "hi r14 0x000003fc",
+ "hi_r14_"
+ "0x000003fc"},
+ {{mi, r14, 0x03fc0000},
+ false,
+ al,
+ "mi r14 0x03fc0000",
+ "mi_r14_"
+ "0x03fc0000"},
+ {{mi, r8, 0x000000ff}, false, al, "mi r8 0x000000ff", "mi_r8_0x000000ff"},
+ {{gt, r6, 0x000ab000}, false, al, "gt r6 0x000ab000", "gt_r6_0x000ab000"},
+ {{mi, r12, 0x0ff00000},
+ false,
+ al,
+ "mi r12 0x0ff00000",
+ "mi_r12_"
+ "0x0ff00000"},
+ {{vs, r3, 0xff000000}, false, al, "vs r3 0xff000000", "vs_r3_0xff000000"},
+ {{vc, r7, 0x00ab0000}, false, al, "vc r7 0x00ab0000", "vc_r7_0x00ab0000"},
+ {{hi, r6, 0x03fc0000}, false, al, "hi r6 0x03fc0000", "hi_r6_0x03fc0000"},
+ {{ls, r5, 0x00ab0000}, false, al, "ls r5 0x00ab0000", "ls_r5_0x00ab0000"},
+ {{ls, r4, 0x000002ac}, false, al, "ls r4 0x000002ac", "ls_r4_0x000002ac"},
+ {{le, r13, 0x03fc0000},
+ false,
+ al,
+ "le r13 0x03fc0000",
+ "le_r13_"
+ "0x03fc0000"},
+ {{le, r15, 0x002ac000},
+ false,
+ al,
+ "le r15 0x002ac000",
+ "le_r15_"
+ "0x002ac000"},
+ {{pl, r6, 0xff000000}, false, al, "pl r6 0xff000000", "pl_r6_0xff000000"},
+ {{gt, r7, 0x00000ab0}, false, al, "gt r7 0x00000ab0", "gt_r7_0x00000ab0"},
+ {{hi, r3, 0x2ac00000}, false, al, "hi r3 0x2ac00000", "hi_r3_0x2ac00000"},
+ {{gt, r15, 0x00000000},
+ false,
+ al,
+ "gt r15 0x00000000",
+ "gt_r15_"
+ "0x00000000"},
+ {{gt, r1, 0x002ac000}, false, al, "gt r1 0x002ac000", "gt_r1_0x002ac000"},
+ {{hi, r2, 0x0000ff00}, false, al, "hi r2 0x0000ff00", "hi_r2_0x0000ff00"},
+ {{pl, r10, 0x03fc0000},
+ false,
+ al,
+ "pl r10 0x03fc0000",
+ "pl_r10_"
+ "0x03fc0000"},
+ {{gt, r10, 0xb000000a},
+ false,
+ al,
+ "gt r10 0xb000000a",
+ "gt_r10_"
+ "0xb000000a"},
+ {{ge, r10, 0x002ac000},
+ false,
+ al,
+ "ge r10 0x002ac000",
+ "ge_r10_"
+ "0x002ac000"},
+ {{al, r4, 0x0ab00000}, false, al, "al r4 0x0ab00000", "al_r4_0x0ab00000"},
+ {{ne, r12, 0x00000000},
+ false,
+ al,
+ "ne r12 0x00000000",
+ "ne_r12_"
+ "0x00000000"},
+ {{cs, r5, 0x00003fc0}, false, al, "cs r5 0x00003fc0", "cs_r5_0x00003fc0"},
+ {{cc, r6, 0x000000ff}, false, al, "cc r6 0x000000ff", "cc_r6_0x000000ff"},
+ {{hi, r14, 0x03fc0000},
+ false,
+ al,
+ "hi r14 0x03fc0000",
+ "hi_r14_"
+ "0x03fc0000"},
+ {{hi, r3, 0xab000000}, false, al, "hi r3 0xab000000", "hi_r3_0xab000000"},
+ {{lt, r2, 0x000ff000}, false, al, "lt r2 0x000ff000", "lt_r2_0x000ff000"},
+ {{gt, r3, 0x00ff0000}, false, al, "gt r3 0x00ff0000", "gt_r3_0x00ff0000"},
+ {{le, r14, 0x0ab00000},
+ false,
+ al,
+ "le r14 0x0ab00000",
+ "le_r14_"
+ "0x0ab00000"},
+ {{cc, r13, 0x03fc0000},
+ false,
+ al,
+ "cc r13 0x03fc0000",
+ "cc_r13_"
+ "0x03fc0000"},
+ {{gt, r6, 0xb000000a}, false, al, "gt r6 0xb000000a", "gt_r6_0xb000000a"},
+ {{pl, r3, 0x2ac00000}, false, al, "pl r3 0x2ac00000", "pl_r3_0x2ac00000"},
+ {{mi, r15, 0x000000ff},
+ false,
+ al,
+ "mi r15 0x000000ff",
+ "mi_r15_"
+ "0x000000ff"},
+ {{gt, r4, 0x00000ff0}, false, al, "gt r4 0x00000ff0", "gt_r4_0x00000ff0"},
+ {{ne, r4, 0x000003fc}, false, al, "ne r4 0x000003fc", "ne_r4_0x000003fc"},
+ {{ne, r5, 0x00ff0000}, false, al, "ne r5 0x00ff0000", "ne_r5_0x00ff0000"},
+ {{ge, r7, 0x000003fc}, false, al, "ge r7 0x000003fc", "ge_r7_0x000003fc"},
+ {{vs, r5, 0x00000000}, false, al, "vs r5 0x00000000", "vs_r5_0x00000000"},
+ {{vs, r6, 0x2ac00000}, false, al, "vs r6 0x2ac00000", "vs_r6_0x2ac00000"},
+ {{mi, r1, 0x3fc00000}, false, al, "mi r1 0x3fc00000", "mi_r1_0x3fc00000"},
+ {{gt, r8, 0xc000002a}, false, al, "gt r8 0xc000002a", "gt_r8_0xc000002a"},
+ {{pl, r10, 0x000000ab},
+ false,
+ al,
+ "pl r10 0x000000ab",
+ "pl_r10_"
+ "0x000000ab"},
+ {{mi, r6, 0xab000000}, false, al, "mi r6 0xab000000", "mi_r6_0xab000000"},
+ {{ls, r12, 0x3fc00000},
+ false,
+ al,
+ "ls r12 0x3fc00000",
+ "ls_r12_"
+ "0x3fc00000"},
+ {{vs, r13, 0x03fc0000},
+ false,
+ al,
+ "vs r13 0x03fc0000",
+ "vs_r13_"
+ "0x03fc0000"},
+ {{eq, r8, 0x00000ab0}, false, al, "eq r8 0x00000ab0", "eq_r8_0x00000ab0"},
+ {{gt, r6, 0x000000ff}, false, al, "gt r6 0x000000ff", "gt_r6_0x000000ff"},
+ {{vs, r5, 0x000ab000}, false, al, "vs r5 0x000ab000", "vs_r5_0x000ab000"},
+ {{mi, r1, 0x2ac00000}, false, al, "mi r1 0x2ac00000", "mi_r1_0x2ac00000"},
+ {{vs, r6, 0x00003fc0}, false, al, "vs r6 0x00003fc0", "vs_r6_0x00003fc0"},
+ {{gt, r6, 0x000002ac}, false, al, "gt r6 0x000002ac", "gt_r6_0x000002ac"},
+ {{eq, r2, 0x00000000}, false, al, "eq r2 0x00000000", "eq_r2_0x00000000"},
+ {{eq, r10, 0xc000003f},
+ false,
+ al,
+ "eq r10 0xc000003f",
+ "eq_r10_"
+ "0xc000003f"},
+ {{mi, r7, 0x00ab0000}, false, al, "mi r7 0x00ab0000", "mi_r7_0x00ab0000"},
+ {{cc, r7, 0x2ac00000}, false, al, "cc r7 0x2ac00000", "cc_r7_0x2ac00000"},
+ {{pl, r4, 0x00ab0000}, false, al, "pl r4 0x00ab0000", "pl_r4_0x00ab0000"},
+ {{ne, r15, 0x00000ff0},
+ false,
+ al,
+ "ne r15 0x00000ff0",
+ "ne_r15_"
+ "0x00000ff0"},
+ {{al, r6, 0x02ac0000}, false, al, "al r6 0x02ac0000", "al_r6_0x02ac0000"},
+ {{pl, r6, 0x000002ac}, false, al, "pl r6 0x000002ac", "pl_r6_0x000002ac"},
+ {{ne, r14, 0x00ff0000},
+ false,
+ al,
+ "ne r14 0x00ff0000",
+ "ne_r14_"
+ "0x00ff0000"},
+ {{ne, r5, 0x0003fc00}, false, al, "ne r5 0x0003fc00", "ne_r5_0x0003fc00"},
+ {{pl, r6, 0x00000ab0}, false, al, "pl r6 0x00000ab0", "pl_r6_0x00000ab0"},
+ {{eq, r10, 0x00002ac0},
+ false,
+ al,
+ "eq r10 0x00002ac0",
+ "eq_r10_"
+ "0x00002ac0"},
+ {{mi, r4, 0x00000ab0}, false, al, "mi r4 0x00000ab0", "mi_r4_0x00000ab0"},
+ {{vc, r5, 0x000ab000}, false, al, "vc r5 0x000ab000", "vc_r5_0x000ab000"},
+ {{ge, r0, 0x02ac0000}, false, al, "ge r0 0x02ac0000", "ge_r0_0x02ac0000"},
+ {{pl, r5, 0x000002ac}, false, al, "pl r5 0x000002ac", "pl_r5_0x000002ac"},
+ {{ge, r13, 0x00000ab0},
+ false,
+ al,
+ "ge r13 0x00000ab0",
+ "ge_r13_"
+ "0x00000ab0"},
+ {{eq, r2, 0x03fc0000}, false, al, "eq r2 0x03fc0000", "eq_r2_0x03fc0000"},
+ {{lt, r11, 0x00000ab0},
+ false,
+ al,
+ "lt r11 0x00000ab0",
+ "lt_r11_"
+ "0x00000ab0"},
+ {{ge, r6, 0x00000000}, false, al, "ge r6 0x00000000", "ge_r6_0x00000000"},
+ {{gt, r2, 0xac000002}, false, al, "gt r2 0xac000002", "gt_r2_0xac000002"},
+ {{le, r15, 0x000000ab},
+ false,
+ al,
+ "le r15 0x000000ab",
+ "le_r15_"
+ "0x000000ab"},
+ {{cc, r4, 0x00000ff0}, false, al, "cc r4 0x00000ff0", "cc_r4_0x00000ff0"},
+ {{pl, r10, 0x02ac0000},
+ false,
+ al,
+ "pl r10 0x02ac0000",
+ "pl_r10_"
+ "0x02ac0000"},
+ {{gt, r9, 0x00000000}, false, al, "gt r9 0x00000000", "gt_r9_0x00000000"},
+ {{vs, r8, 0x000000ff}, false, al, "vs r8 0x000000ff", "vs_r8_0x000000ff"},
+ {{gt, r14, 0x0002ac00},
+ false,
+ al,
+ "gt r14 0x0002ac00",
+ "gt_r14_"
+ "0x0002ac00"},
+ {{vs, r14, 0x00002ac0},
+ false,
+ al,
+ "vs r14 0x00002ac0",
+ "vs_r14_"
+ "0x00002ac0"},
+ {{ge, r12, 0x00000000},
+ false,
+ al,
+ "ge r12 0x00000000",
+ "ge_r12_"
+ "0x00000000"},
+ {{vc, r8, 0xf000000f}, false, al, "vc r8 0xf000000f", "vc_r8_0xf000000f"},
+ {{cs, r6, 0x00003fc0}, false, al, "cs r6 0x00003fc0", "cs_r6_0x00003fc0"},
+ {{le, r4, 0x000003fc}, false, al, "le r4 0x000003fc", "le_r4_0x000003fc"},
+ {{cs, r5, 0x000ff000}, false, al, "cs r5 0x000ff000", "cs_r5_0x000ff000"},
+ {{eq, r2, 0x0000ff00}, false, al, "eq r2 0x0000ff00", "eq_r2_0x0000ff00"},
+ {{pl, r10, 0x0ab00000},
+ false,
+ al,
+ "pl r10 0x0ab00000",
+ "pl_r10_"
+ "0x0ab00000"},
+ {{le, r11, 0xac000002},
+ false,
+ al,
+ "le r11 0xac000002",
+ "le_r11_"
+ "0xac000002"},
+ {{vs, r15, 0x00003fc0},
+ false,
+ al,
+ "vs r15 0x00003fc0",
+ "vs_r15_"
+ "0x00003fc0"},
+ {{lt, r2, 0x0002ac00}, false, al, "lt r2 0x0002ac00", "lt_r2_0x0002ac00"},
+ {{eq, r1, 0x00ab0000}, false, al, "eq r1 0x00ab0000", "eq_r1_0x00ab0000"},
+ {{cc, r7, 0x03fc0000}, false, al, "cc r7 0x03fc0000", "cc_r7_0x03fc0000"},
+ {{mi, r6, 0x00000ab0}, false, al, "mi r6 0x00000ab0", "mi_r6_0x00000ab0"},
+ {{eq, r4, 0x00000ab0}, false, al, "eq r4 0x00000ab0", "eq_r4_0x00000ab0"},
+ {{ls, r3, 0x0003fc00}, false, al, "ls r3 0x0003fc00", "ls_r3_0x0003fc00"},
+ {{mi, r6, 0x000ab000}, false, al, "mi r6 0x000ab000", "mi_r6_0x000ab000"},
+ {{ne, r12, 0x003fc000},
+ false,
+ al,
+ "ne r12 0x003fc000",
+ "ne_r12_"
+ "0x003fc000"},
+ {{eq, r11, 0x00ff0000},
+ false,
+ al,
+ "eq r11 0x00ff0000",
+ "eq_r11_"
+ "0x00ff0000"},
+ {{cs, r13, 0x00ab0000},
+ false,
+ al,
+ "cs r13 0x00ab0000",
+ "cs_r13_"
+ "0x00ab0000"},
+ {{eq, r5, 0x000003fc}, false, al, "eq r5 0x000003fc", "eq_r5_0x000003fc"},
+ {{vs, r6, 0x0003fc00}, false, al, "vs r6 0x0003fc00", "vs_r6_0x0003fc00"},
+ {{pl, r8, 0x0ff00000}, false, al, "pl r8 0x0ff00000", "pl_r8_0x0ff00000"},
+ {{pl, r11, 0x0000ab00},
+ false,
+ al,
+ "pl r11 0x0000ab00",
+ "pl_r11_"
+ "0x0000ab00"},
+ {{le, r2, 0xac000002}, false, al, "le r2 0xac000002", "le_r2_0xac000002"},
+ {{vc, r10, 0x000ff000},
+ false,
+ al,
+ "vc r10 0x000ff000",
+ "vc_r10_"
+ "0x000ff000"},
+ {{le, r4, 0x00000ff0}, false, al, "le r4 0x00000ff0", "le_r4_0x00000ff0"},
+ {{gt, r12, 0x00000ff0},
+ false,
+ al,
+ "gt r12 0x00000ff0",
+ "gt_r12_"
+ "0x00000ff0"},
+ {{le, r5, 0x0002ac00}, false, al, "le r5 0x0002ac00", "le_r5_0x0002ac00"},
+ {{le, r0, 0xac000002}, false, al, "le r0 0xac000002", "le_r0_0xac000002"},
+ {{vs, r11, 0x0ff00000},
+ false,
+ al,
+ "vs r11 0x0ff00000",
+ "vs_r11_"
+ "0x0ff00000"},
+ {{ls, r0, 0x000ab000}, false, al, "ls r0 0x000ab000", "ls_r0_0x000ab000"},
+ {{ls, r2, 0xf000000f}, false, al, "ls r2 0xf000000f", "ls_r2_0xf000000f"},
+ {{cs, r3, 0x0ff00000}, false, al, "cs r3 0x0ff00000", "cs_r3_0x0ff00000"},
+ {{hi, r8, 0x0ff00000}, false, al, "hi r8 0x0ff00000", "hi_r8_0x0ff00000"},
+ {{gt, r3, 0x00002ac0}, false, al, "gt r3 0x00002ac0", "gt_r3_0x00002ac0"},
+ {{al, r15, 0xab000000},
+ false,
+ al,
+ "al r15 0xab000000",
+ "al_r15_"
+ "0xab000000"},
+ {{eq, r13, 0x000000ab},
+ false,
+ al,
+ "eq r13 0x000000ab",
+ "eq_r13_"
+ "0x000000ab"},
+ {{al, r2, 0xc000002a}, false, al, "al r2 0xc000002a", "al_r2_0xc000002a"},
+ {{eq, r13, 0x03fc0000},
+ false,
+ al,
+ "eq r13 0x03fc0000",
+ "eq_r13_"
+ "0x03fc0000"},
+ {{eq, r3, 0x00000ff0}, false, al, "eq r3 0x00000ff0", "eq_r3_0x00000ff0"},
+ {{hi, r12, 0x00002ac0},
+ false,
+ al,
+ "hi r12 0x00002ac0",
+ "hi_r12_"
+ "0x00002ac0"},
+ {{mi, r2, 0x0ff00000}, false, al, "mi r2 0x0ff00000", "mi_r2_0x0ff00000"},
+ {{ne, r9, 0x003fc000}, false, al, "ne r9 0x003fc000", "ne_r9_0x003fc000"},
+ {{eq, r14, 0x03fc0000},
+ false,
+ al,
+ "eq r14 0x03fc0000",
+ "eq_r14_"
+ "0x03fc0000"},
+ {{cc, r0, 0x002ac000}, false, al, "cc r0 0x002ac000", "cc_r0_0x002ac000"},
+ {{vc, r14, 0x00000ab0},
+ false,
+ al,
+ "vc r14 0x00000ab0",
+ "vc_r14_"
+ "0x00000ab0"},
+ {{mi, r15, 0xf000000f},
+ false,
+ al,
+ "mi r15 0xf000000f",
+ "mi_r15_"
+ "0xf000000f"},
+ {{ge, r9, 0x000003fc}, false, al, "ge r9 0x000003fc", "ge_r9_0x000003fc"},
+ {{vs, r13, 0xac000002},
+ false,
+ al,
+ "vs r13 0xac000002",
+ "vs_r13_"
+ "0xac000002"},
+ {{vs, r1, 0x3fc00000}, false, al, "vs r1 0x3fc00000", "vs_r1_0x3fc00000"},
+ {{eq, r12, 0x00003fc0},
+ false,
+ al,
+ "eq r12 0x00003fc0",
+ "eq_r12_"
+ "0x00003fc0"},
+ {{mi, r6, 0xff000000}, false, al, "mi r6 0xff000000", "mi_r6_0xff000000"},
+ {{ne, r5, 0x000003fc}, false, al, "ne r5 0x000003fc", "ne_r5_0x000003fc"},
+ {{lt, r8, 0x0ff00000}, false, al, "lt r8 0x0ff00000", "lt_r8_0x0ff00000"},
+ {{hi, r7, 0x3fc00000}, false, al, "hi r7 0x3fc00000", "hi_r7_0x3fc00000"},
+ {{ge, r10, 0xac000002},
+ false,
+ al,
+ "ge r10 0xac000002",
+ "ge_r10_"
+ "0xac000002"},
+ {{vs, r2, 0x0000ff00}, false, al, "vs r2 0x0000ff00", "vs_r2_0x0000ff00"},
+ {{al, r6, 0x000000ab}, false, al, "al r6 0x000000ab", "al_r6_0x000000ab"},
+ {{ge, r7, 0x00ff0000}, false, al, "ge r7 0x00ff0000", "ge_r7_0x00ff0000"},
+ {{ne, r0, 0x000ff000}, false, al, "ne r0 0x000ff000", "ne_r0_0x000ff000"},
+ {{mi, r6, 0x000000ab}, false, al, "mi r6 0x000000ab", "mi_r6_0x000000ab"},
+ {{hi, r1, 0xf000000f}, false, al, "hi r1 0xf000000f", "hi_r1_0xf000000f"},
+ {{mi, r6, 0x2ac00000}, false, al, "mi r6 0x2ac00000", "mi_r6_0x2ac00000"},
+ {{vc, r11, 0x000000ff},
+ false,
+ al,
+ "vc r11 0x000000ff",
+ "vc_r11_"
+ "0x000000ff"},
+ {{ls, r14, 0x02ac0000},
+ false,
+ al,
+ "ls r14 0x02ac0000",
+ "ls_r14_"
+ "0x02ac0000"},
+ {{ge, r5, 0x003fc000}, false, al, "ge r5 0x003fc000", "ge_r5_0x003fc000"},
+ {{ls, r12, 0x0000ab00},
+ false,
+ al,
+ "ls r12 0x0000ab00",
+ "ls_r12_"
+ "0x0000ab00"},
+ {{cc, r15, 0x00000ab0},
+ false,
+ al,
+ "cc r15 0x00000ab0",
+ "cc_r15_"
+ "0x00000ab0"},
+ {{vc, r12, 0x0000ab00},
+ false,
+ al,
+ "vc r12 0x0000ab00",
+ "vc_r12_"
+ "0x0000ab00"},
+ {{vs, r2, 0xc000002a}, false, al, "vs r2 0xc000002a", "vs_r2_0xc000002a"},
+ {{lt, r7, 0x0ab00000}, false, al, "lt r7 0x0ab00000", "lt_r7_0x0ab00000"},
+ {{ls, r6, 0x00000ff0}, false, al, "ls r6 0x00000ff0", "ls_r6_0x00000ff0"},
+ {{vc, r10, 0x000000ff},
+ false,
+ al,
+ "vc r10 0x000000ff",
+ "vc_r10_"
+ "0x000000ff"},
+ {{ls, r4, 0x0000ab00}, false, al, "ls r4 0x0000ab00", "ls_r4_0x0000ab00"},
+ {{mi, r10, 0x003fc000},
+ false,
+ al,
+ "mi r10 0x003fc000",
+ "mi_r10_"
+ "0x003fc000"},
+ {{ls, r1, 0x000002ac}, false, al, "ls r1 0x000002ac", "ls_r1_0x000002ac"},
+ {{ge, r7, 0xb000000a}, false, al, "ge r7 0xb000000a", "ge_r7_0xb000000a"},
+ {{gt, r4, 0xf000000f}, false, al, "gt r4 0xf000000f", "gt_r4_0xf000000f"},
+ {{vc, r8, 0x002ac000}, false, al, "vc r8 0x002ac000", "vc_r8_0x002ac000"},
+ {{eq, r5, 0x0ab00000}, false, al, "eq r5 0x0ab00000", "eq_r5_0x0ab00000"},
+ {{gt, r2, 0xf000000f}, false, al, "gt r2 0xf000000f", "gt_r2_0xf000000f"},
+ {{gt, r6, 0xff000000}, false, al, "gt r6 0xff000000", "gt_r6_0xff000000"},
+ {{ls, r8, 0x0ab00000}, false, al, "ls r8 0x0ab00000", "ls_r8_0x0ab00000"},
+ {{vc, r0, 0xb000000a}, false, al, "vc r0 0xb000000a", "vc_r0_0xb000000a"},
+ {{lt, r6, 0x03fc0000}, false, al, "lt r6 0x03fc0000", "lt_r6_0x03fc0000"},
+ {{ge, r10, 0x0000ab00},
+ false,
+ al,
+ "ge r10 0x0000ab00",
+ "ge_r10_"
+ "0x0000ab00"},
+ {{hi, r4, 0x000ab000}, false, al, "hi r4 0x000ab000", "hi_r4_0x000ab000"},
+ {{hi, r11, 0x3fc00000},
+ false,
+ al,
+ "hi r11 0x3fc00000",
+ "hi_r11_"
+ "0x3fc00000"},
+ {{vs, r12, 0xc000003f},
+ false,
+ al,
+ "vs r12 0xc000003f",
+ "vs_r12_"
+ "0xc000003f"},
+ {{gt, r12, 0xb000000a},
+ false,
+ al,
+ "gt r12 0xb000000a",
+ "gt_r12_"
+ "0xb000000a"},
+ {{eq, r11, 0x2ac00000},
+ false,
+ al,
+ "eq r11 0x2ac00000",
+ "eq_r11_"
+ "0x2ac00000"},
+ {{hi, r0, 0xc000003f}, false, al, "hi r0 0xc000003f", "hi_r0_0xc000003f"},
+ {{cs, r12, 0xac000002},
+ false,
+ al,
+ "cs r12 0xac000002",
+ "cs_r12_"
+ "0xac000002"},
+ {{hi, r9, 0x3fc00000}, false, al, "hi r9 0x3fc00000", "hi_r9_0x3fc00000"},
+ {{vs, r2, 0x00002ac0}, false, al, "vs r2 0x00002ac0", "vs_r2_0x00002ac0"},
+ {{al, r12, 0xb000000a},
+ false,
+ al,
+ "al r12 0xb000000a",
+ "al_r12_"
+ "0xb000000a"},
+ {{gt, r12, 0x3fc00000},
+ false,
+ al,
+ "gt r12 0x3fc00000",
+ "gt_r12_"
+ "0x3fc00000"},
+ {{gt, r6, 0xf000000f}, false, al, "gt r6 0xf000000f", "gt_r6_0xf000000f"},
+ {{vc, r14, 0x000000ff},
+ false,
+ al,
+ "vc r14 0x000000ff",
+ "vc_r14_"
+ "0x000000ff"},
+ {{pl, r7, 0x0002ac00}, false, al, "pl r7 0x0002ac00", "pl_r7_0x0002ac00"},
+ {{ge, r1, 0x03fc0000}, false, al, "ge r1 0x03fc0000", "ge_r1_0x03fc0000"},
+ {{hi, r10, 0x0002ac00},
+ false,
+ al,
+ "hi r10 0x0002ac00",
+ "hi_r10_"
+ "0x0002ac00"},
+ {{gt, r4, 0x002ac000}, false, al, "gt r4 0x002ac000", "gt_r4_0x002ac000"},
+ {{vc, r5, 0x000000ff}, false, al, "vc r5 0x000000ff", "vc_r5_0x000000ff"},
+ {{pl, r15, 0x0000ab00},
+ false,
+ al,
+ "pl r15 0x0000ab00",
+ "pl_r15_"
+ "0x0000ab00"},
+ {{cc, r4, 0x00ab0000}, false, al, "cc r4 0x00ab0000", "cc_r4_0x00ab0000"},
+ {{pl, r11, 0xff000000},
+ false,
+ al,
+ "pl r11 0xff000000",
+ "pl_r11_"
+ "0xff000000"},
+ {{pl, r2, 0xf000000f}, false, al, "pl r2 0xf000000f", "pl_r2_0xf000000f"},
+ {{cc, r8, 0xb000000a}, false, al, "cc r8 0xb000000a", "cc_r8_0xb000000a"},
+ {{al, r13, 0x000000ff},
+ false,
+ al,
+ "al r13 0x000000ff",
+ "al_r13_"
+ "0x000000ff"},
+ {{mi, r6, 0x000003fc}, false, al, "mi r6 0x000003fc", "mi_r6_0x000003fc"},
+ {{vs, r13, 0x02ac0000},
+ false,
+ al,
+ "vs r13 0x02ac0000",
+ "vs_r13_"
+ "0x02ac0000"},
+ {{mi, r4, 0x00ff0000}, false, al, "mi r4 0x00ff0000", "mi_r4_0x00ff0000"},
+ {{cs, r3, 0x000003fc}, false, al, "cs r3 0x000003fc", "cs_r3_0x000003fc"},
+ {{pl, r13, 0xab000000},
+ false,
+ al,
+ "pl r13 0xab000000",
+ "pl_r13_"
+ "0xab000000"},
+ {{ls, r9, 0x002ac000}, false, al, "ls r9 0x002ac000", "ls_r9_0x002ac000"},
+ {{eq, r1, 0xc000002a}, false, al, "eq r1 0xc000002a", "eq_r1_0xc000002a"},
+ {{lt, r12, 0x00000000},
+ false,
+ al,
+ "lt r12 0x00000000",
+ "lt_r12_"
+ "0x00000000"},
+ {{ge, r14, 0xff000000},
+ false,
+ al,
+ "ge r14 0xff000000",
+ "ge_r14_"
+ "0xff000000"},
+ {{lt, r9, 0x002ac000}, false, al, "lt r9 0x002ac000", "lt_r9_0x002ac000"},
+ {{lt, r10, 0x00000ff0},
+ false,
+ al,
+ "lt r10 0x00000ff0",
+ "lt_r10_"
+ "0x00000ff0"},
+ {{vs, r5, 0x000000ff}, false, al, "vs r5 0x000000ff", "vs_r5_0x000000ff"},
+ {{cc, r12, 0x03fc0000},
+ false,
+ al,
+ "cc r12 0x03fc0000",
+ "cc_r12_"
+ "0x03fc0000"},
+ {{ne, r4, 0x00000000}, false, al, "ne r4 0x00000000", "ne_r4_0x00000000"},
+ {{mi, r13, 0xff000000},
+ false,
+ al,
+ "mi r13 0xff000000",
+ "mi_r13_"
+ "0xff000000"},
+ {{ne, r7, 0x00000ff0}, false, al, "ne r7 0x00000ff0", "ne_r7_0x00000ff0"},
+ {{vs, r2, 0xc000003f}, false, al, "vs r2 0xc000003f", "vs_r2_0xc000003f"},
+ {{al, r5, 0x00ff0000}, false, al, "al r5 0x00ff0000", "al_r5_0x00ff0000"},
+ {{hi, r15, 0x00000ff0},
+ false,
+ al,
+ "hi r15 0x00000ff0",
+ "hi_r15_"
+ "0x00000ff0"},
+ {{ls, r8, 0x00003fc0}, false, al, "ls r8 0x00003fc0", "ls_r8_0x00003fc0"},
+ {{vs, r0, 0xff000000}, false, al, "vs r0 0xff000000", "vs_r0_0xff000000"},
+ {{vs, r6, 0x000000ab}, false, al, "vs r6 0x000000ab", "vs_r6_0x000000ab"},
+ {{cs, r9, 0x00ab0000}, false, al, "cs r9 0x00ab0000", "cs_r9_0x00ab0000"},
+ {{hi, r1, 0x0002ac00}, false, al, "hi r1 0x0002ac00", "hi_r1_0x0002ac00"},
+ {{hi, r15, 0x2ac00000},
+ false,
+ al,
+ "hi r15 0x2ac00000",
+ "hi_r15_"
+ "0x2ac00000"},
+ {{hi, r6, 0x0002ac00}, false, al, "hi r6 0x0002ac00", "hi_r6_0x0002ac00"},
+ {{ge, r4, 0xc000003f}, false, al, "ge r4 0xc000003f", "ge_r4_0xc000003f"},
+ {{ls, r10, 0x0000ff00},
+ false,
+ al,
+ "ls r10 0x0000ff00",
+ "ls_r10_"
+ "0x0000ff00"},
+ {{ne, r11, 0x000003fc},
+ false,
+ al,
+ "ne r11 0x000003fc",
+ "ne_r11_"
+ "0x000003fc"},
+ {{ls, r3, 0x0002ac00}, false, al, "ls r3 0x0002ac00", "ls_r3_0x0002ac00"},
+ {{al, r12, 0x000003fc},
+ false,
+ al,
+ "al r12 0x000003fc",
+ "al_r12_"
+ "0x000003fc"},
+ {{le, r7, 0xf000000f}, false, al, "le r7 0xf000000f", "le_r7_0xf000000f"},
+ {{al, r11, 0x00000ab0},
+ false,
+ al,
+ "al r11 0x00000ab0",
+ "al_r11_"
+ "0x00000ab0"},
+ {{cs, r13, 0x02ac0000},
+ false,
+ al,
+ "cs r13 0x02ac0000",
+ "cs_r13_"
+ "0x02ac0000"},
+ {{hi, r1, 0x00000ff0}, false, al, "hi r1 0x00000ff0", "hi_r1_0x00000ff0"},
+ {{le, r11, 0x3fc00000},
+ false,
+ al,
+ "le r11 0x3fc00000",
+ "le_r11_"
+ "0x3fc00000"},
+ {{hi, r9, 0x000003fc}, false, al, "hi r9 0x000003fc", "hi_r9_0x000003fc"},
+ {{mi, r13, 0x000002ac},
+ false,
+ al,
+ "mi r13 0x000002ac",
+ "mi_r13_"
+ "0x000002ac"},
+ {{lt, r12, 0x000003fc},
+ false,
+ al,
+ "lt r12 0x000003fc",
+ "lt_r12_"
+ "0x000003fc"},
+ {{lt, r14, 0x00000ab0},
+ false,
+ al,
+ "lt r14 0x00000ab0",
+ "lt_r14_"
+ "0x00000ab0"},
+ {{gt, r1, 0x3fc00000}, false, al, "gt r1 0x3fc00000", "gt_r1_0x3fc00000"},
+ {{cc, r14, 0xb000000a},
+ false,
+ al,
+ "cc r14 0xb000000a",
+ "cc_r14_"
+ "0xb000000a"},
+ {{ge, r0, 0x000002ac}, false, al, "ge r0 0x000002ac", "ge_r0_0x000002ac"},
+ {{eq, r12, 0x000003fc},
+ false,
+ al,
+ "eq r12 0x000003fc",
+ "eq_r12_"
+ "0x000003fc"},
+ {{vc, r13, 0x0ab00000},
+ false,
+ al,
+ "vc r13 0x0ab00000",
+ "vc_r13_"
+ "0x0ab00000"},
+ {{pl, r10, 0x0003fc00},
+ false,
+ al,
+ "pl r10 0x0003fc00",
+ "pl_r10_"
+ "0x0003fc00"},
+ {{le, r7, 0x0000ff00}, false, al, "le r7 0x0000ff00", "le_r7_0x0000ff00"},
+ {{eq, r5, 0x0003fc00}, false, al, "eq r5 0x0003fc00", "eq_r5_0x0003fc00"},
+ {{pl, r1, 0xfc000003}, false, al, "pl r1 0xfc000003", "pl_r1_0xfc000003"},
+ {{gt, r15, 0x000ff000},
+ false,
+ al,
+ "gt r15 0x000ff000",
+ "gt_r15_"
+ "0x000ff000"},
+ {{mi, r2, 0xb000000a}, false, al, "mi r2 0xb000000a", "mi_r2_0xb000000a"},
+ {{cs, r8, 0x0000ff00}, false, al, "cs r8 0x0000ff00", "cs_r8_0x0000ff00"},
+ {{vs, r8, 0x00002ac0}, false, al, "vs r8 0x00002ac0", "vs_r8_0x00002ac0"},
+ {{cs, r5, 0x00ab0000}, false, al, "cs r5 0x00ab0000", "cs_r5_0x00ab0000"},
+ {{pl, r3, 0x00ab0000}, false, al, "pl r3 0x00ab0000", "pl_r3_0x00ab0000"},
+ {{hi, r5, 0x02ac0000}, false, al, "hi r5 0x02ac0000", "hi_r5_0x02ac0000"},
+ {{cc, r9, 0x000002ac}, false, al, "cc r9 0x000002ac", "cc_r9_0x000002ac"},
+ {{ls, r13, 0x0000ab00},
+ false,
+ al,
+ "ls r13 0x0000ab00",
+ "ls_r13_"
+ "0x0000ab00"},
+ {{pl, r11, 0x00000ab0},
+ false,
+ al,
+ "pl r11 0x00000ab0",
+ "pl_r11_"
+ "0x00000ab0"},
+ {{ge, r14, 0x3fc00000},
+ false,
+ al,
+ "ge r14 0x3fc00000",
+ "ge_r14_"
+ "0x3fc00000"},
+ {{al, r14, 0x0000ab00},
+ false,
+ al,
+ "al r14 0x0000ab00",
+ "al_r14_"
+ "0x0000ab00"},
+ {{lt, r6, 0xac000002}, false, al, "lt r6 0xac000002", "lt_r6_0xac000002"},
+ {{vc, r3, 0x000ff000}, false, al, "vc r3 0x000ff000", "vc_r3_0x000ff000"},
+ {{ne, r8, 0xfc000003}, false, al, "ne r8 0xfc000003", "ne_r8_0xfc000003"},
+ {{cs, r6, 0x000ab000}, false, al, "cs r6 0x000ab000", "cs_r6_0x000ab000"},
+ {{hi, r15, 0x0002ac00},
+ false,
+ al,
+ "hi r15 0x0002ac00",
+ "hi_r15_"
+ "0x0002ac00"},
+ {{pl, r6, 0x00000ff0}, false, al, "pl r6 0x00000ff0", "pl_r6_0x00000ff0"},
+ {{hi, r15, 0x03fc0000},
+ false,
+ al,
+ "hi r15 0x03fc0000",
+ "hi_r15_"
+ "0x03fc0000"},
+ {{cc, r6, 0x0003fc00}, false, al, "cc r6 0x0003fc00", "cc_r6_0x0003fc00"},
+ {{eq, r12, 0x000002ac},
+ false,
+ al,
+ "eq r12 0x000002ac",
+ "eq_r12_"
+ "0x000002ac"},
+ {{ls, r11, 0x02ac0000},
+ false,
+ al,
+ "ls r11 0x02ac0000",
+ "ls_r11_"
+ "0x02ac0000"},
+ {{ge, r13, 0x00ff0000},
+ false,
+ al,
+ "ge r13 0x00ff0000",
+ "ge_r13_"
+ "0x00ff0000"},
+ {{lt, r4, 0x0003fc00}, false, al, "lt r4 0x0003fc00", "lt_r4_0x0003fc00"},
+ {{mi, r0, 0x0000ab00}, false, al, "mi r0 0x0000ab00", "mi_r0_0x0000ab00"},
+ {{lt, r4, 0x000000ab}, false, al, "lt r4 0x000000ab", "lt_r4_0x000000ab"},
+ {{ls, r2, 0xc000003f}, false, al, "ls r2 0xc000003f", "ls_r2_0xc000003f"},
+ {{pl, r1, 0x000000ab}, false, al, "pl r1 0x000000ab", "pl_r1_0x000000ab"},
+ {{ne, r10, 0x0000ff00},
+ false,
+ al,
+ "ne r10 0x0000ff00",
+ "ne_r10_"
+ "0x0000ff00"},
+ {{vc, r15, 0x00000ab0},
+ false,
+ al,
+ "vc r15 0x00000ab0",
+ "vc_r15_"
+ "0x00000ab0"},
+ {{eq, r6, 0x02ac0000}, false, al, "eq r6 0x02ac0000", "eq_r6_0x02ac0000"},
+ {{cc, r11, 0x00000000},
+ false,
+ al,
+ "cc r11 0x00000000",
+ "cc_r11_"
+ "0x00000000"},
+ {{mi, r7, 0x002ac000}, false, al, "mi r7 0x002ac000", "mi_r7_0x002ac000"},
+ {{hi, r14, 0xab000000},
+ false,
+ al,
+ "hi r14 0xab000000",
+ "hi_r14_"
+ "0xab000000"},
+ {{vc, r6, 0x0000ff00}, false, al, "vc r6 0x0000ff00", "vc_r6_0x0000ff00"},
+ {{al, r5, 0x000002ac}, false, al, "al r5 0x000002ac", "al_r5_0x000002ac"},
+ {{cc, r12, 0x0002ac00},
+ false,
+ al,
+ "cc r12 0x0002ac00",
+ "cc_r12_"
+ "0x0002ac00"},
+ {{cc, r10, 0x000000ab},
+ false,
+ al,
+ "cc r10 0x000000ab",
+ "cc_r10_"
+ "0x000000ab"},
+ {{gt, r5, 0x000002ac}, false, al, "gt r5 0x000002ac", "gt_r5_0x000002ac"},
+ {{vc, r3, 0x00000000}, false, al, "vc r3 0x00000000", "vc_r3_0x00000000"},
+ {{gt, r12, 0xac000002},
+ false,
+ al,
+ "gt r12 0xac000002",
+ "gt_r12_"
+ "0xac000002"},
+ {{al, r10, 0x00ab0000},
+ false,
+ al,
+ "al r10 0x00ab0000",
+ "al_r10_"
+ "0x00ab0000"},
+ {{mi, r5, 0x000ff000}, false, al, "mi r5 0x000ff000", "mi_r5_0x000ff000"},
+ {{pl, r1, 0x00000ff0}, false, al, "pl r1 0x00000ff0", "pl_r1_0x00000ff0"},
+ {{lt, r7, 0xf000000f}, false, al, "lt r7 0xf000000f", "lt_r7_0xf000000f"},
+ {{ge, r14, 0x002ac000},
+ false,
+ al,
+ "ge r14 0x002ac000",
+ "ge_r14_"
+ "0x002ac000"},
+ {{cc, r0, 0xac000002}, false, al, "cc r0 0xac000002", "cc_r0_0xac000002"},
+ {{cs, r2, 0x00000ab0}, false, al, "cs r2 0x00000ab0", "cs_r2_0x00000ab0"},
+ {{vs, r0, 0x00002ac0}, false, al, "vs r0 0x00002ac0", "vs_r0_0x00002ac0"},
+ {{le, r10, 0x000ab000},
+ false,
+ al,
+ "le r10 0x000ab000",
+ "le_r10_"
+ "0x000ab000"},
+ {{ge, r9, 0x0003fc00}, false, al, "ge r9 0x0003fc00", "ge_r9_0x0003fc00"},
+ {{lt, r1, 0x00003fc0}, false, al, "lt r1 0x00003fc0", "lt_r1_0x00003fc0"},
+ {{ge, r5, 0x000000ff}, false, al, "ge r5 0x000000ff", "ge_r5_0x000000ff"},
+ {{le, r11, 0x2ac00000},
+ false,
+ al,
+ "le r11 0x2ac00000",
+ "le_r11_"
+ "0x2ac00000"},
+ {{le, r9, 0x002ac000}, false, al, "le r9 0x002ac000", "le_r9_0x002ac000"},
+ {{hi, r12, 0xf000000f},
+ false,
+ al,
+ "hi r12 0xf000000f",
+ "hi_r12_"
+ "0xf000000f"},
+ {{lt, r3, 0x02ac0000}, false, al, "lt r3 0x02ac0000", "lt_r3_0x02ac0000"},
+ {{al, r13, 0x2ac00000},
+ false,
+ al,
+ "al r13 0x2ac00000",
+ "al_r13_"
+ "0x2ac00000"},
+ {{vs, r12, 0x00000ab0},
+ false,
+ al,
+ "vs r12 0x00000ab0",
+ "vs_r12_"
+ "0x00000ab0"},
+ {{gt, r3, 0x3fc00000}, false, al, "gt r3 0x3fc00000", "gt_r3_0x3fc00000"},
+ {{gt, r0, 0x2ac00000}, false, al, "gt r0 0x2ac00000", "gt_r0_0x2ac00000"},
+ {{eq, r15, 0x000002ac},
+ false,
+ al,
+ "eq r15 0x000002ac",
+ "eq_r15_"
+ "0x000002ac"},
+ {{gt, r1, 0x000ab000}, false, al, "gt r1 0x000ab000", "gt_r1_0x000ab000"},
+ {{gt, r2, 0x2ac00000}, false, al, "gt r2 0x2ac00000", "gt_r2_0x2ac00000"},
+ {{mi, r15, 0x00ab0000},
+ false,
+ al,
+ "mi r15 0x00ab0000",
+ "mi_r15_"
+ "0x00ab0000"},
+ {{mi, r1, 0x000ab000}, false, al, "mi r1 0x000ab000", "mi_r1_0x000ab000"},
+ {{ge, r12, 0x0ab00000},
+ false,
+ al,
+ "ge r12 0x0ab00000",
+ "ge_r12_"
+ "0x0ab00000"},
+ {{gt, r5, 0x000000ab}, false, al, "gt r5 0x000000ab", "gt_r5_0x000000ab"},
+ {{gt, r4, 0x00000000}, false, al, "gt r4 0x00000000", "gt_r4_0x00000000"},
+ {{al, r13, 0xc000003f},
+ false,
+ al,
+ "al r13 0xc000003f",
+ "al_r13_"
+ "0xc000003f"},
+ {{ls, r7, 0xff000000}, false, al, "ls r7 0xff000000", "ls_r7_0xff000000"},
+ {{vs, r0, 0x00000ff0}, false, al, "vs r0 0x00000ff0", "vs_r0_0x00000ff0"},
+ {{hi, r9, 0x02ac0000}, false, al, "hi r9 0x02ac0000", "hi_r9_0x02ac0000"},
+ {{cs, r1, 0xc000002a}, false, al, "cs r1 0xc000002a", "cs_r1_0xc000002a"},
+ {{hi, r8, 0xf000000f}, false, al, "hi r8 0xf000000f", "hi_r8_0xf000000f"},
+ {{gt, r1, 0xb000000a}, false, al, "gt r1 0xb000000a", "gt_r1_0xb000000a"},
+ {{gt, r2, 0x0002ac00}, false, al, "gt r2 0x0002ac00", "gt_r2_0x0002ac00"},
+ {{vs, r5, 0x000000ab}, false, al, "vs r5 0x000000ab", "vs_r5_0x000000ab"},
+ {{cc, r12, 0x000ff000},
+ false,
+ al,
+ "cc r12 0x000ff000",
+ "cc_r12_"
+ "0x000ff000"},
+ {{ge, r0, 0x00003fc0}, false, al, "ge r0 0x00003fc0", "ge_r0_0x00003fc0"},
+ {{ls, r12, 0x00ab0000},
+ false,
+ al,
+ "ls r12 0x00ab0000",
+ "ls_r12_"
+ "0x00ab0000"},
+ {{vs, r4, 0x000003fc}, false, al, "vs r4 0x000003fc", "vs_r4_0x000003fc"},
+ {{ls, r4, 0x00003fc0}, false, al, "ls r4 0x00003fc0", "ls_r4_0x00003fc0"},
+ {{eq, r9, 0xb000000a}, false, al, "eq r9 0xb000000a", "eq_r9_0xb000000a"},
+ {{cs, r9, 0x2ac00000}, false, al, "cs r9 0x2ac00000", "cs_r9_0x2ac00000"},
+ {{vs, r12, 0x0000ff00},
+ false,
+ al,
+ "vs r12 0x0000ff00",
+ "vs_r12_"
+ "0x0000ff00"},
+ {{vc, r1, 0x0000ff00}, false, al, "vc r1 0x0000ff00", "vc_r1_0x0000ff00"},
+ {{hi, r12, 0xff000000},
+ false,
+ al,
+ "hi r12 0xff000000",
+ "hi_r12_"
+ "0xff000000"},
+ {{cs, r12, 0x0002ac00},
+ false,
+ al,
+ "cs r12 0x0002ac00",
+ "cs_r12_"
+ "0x0002ac00"},
+ {{mi, r11, 0x03fc0000},
+ false,
+ al,
+ "mi r11 0x03fc0000",
+ "mi_r11_"
+ "0x03fc0000"},
+ {{eq, r2, 0x000ff000}, false, al, "eq r2 0x000ff000", "eq_r2_0x000ff000"},
+ {{al, r6, 0x00000ff0}, false, al, "al r6 0x00000ff0", "al_r6_0x00000ff0"},
+ {{cs, r7, 0x000003fc}, false, al, "cs r7 0x000003fc", "cs_r7_0x000003fc"},
+ {{pl, r11, 0xb000000a},
+ false,
+ al,
+ "pl r11 0xb000000a",
+ "pl_r11_"
+ "0xb000000a"},
+ {{ne, r15, 0x000ff000},
+ false,
+ al,
+ "ne r15 0x000ff000",
+ "ne_r15_"
+ "0x000ff000"},
+ {{mi, r14, 0x00ab0000},
+ false,
+ al,
+ "mi r14 0x00ab0000",
+ "mi_r14_"
+ "0x00ab0000"},
+ {{hi, r4, 0x0000ff00}, false, al, "hi r4 0x0000ff00", "hi_r4_0x0000ff00"},
+ {{ge, r1, 0x000002ac}, false, al, "ge r1 0x000002ac", "ge_r1_0x000002ac"},
+ {{gt, r7, 0xb000000a}, false, al, "gt r7 0xb000000a", "gt_r7_0xb000000a"},
+ {{gt, r2, 0x00000000}, false, al, "gt r2 0x00000000", "gt_r2_0x00000000"},
+ {{cc, r2, 0xb000000a}, false, al, "cc r2 0xb000000a", "cc_r2_0xb000000a"},
+ {{vs, r14, 0x000ab000},
+ false,
+ al,
+ "vs r14 0x000ab000",
+ "vs_r14_"
+ "0x000ab000"},
+ {{lt, r5, 0x000002ac}, false, al, "lt r5 0x000002ac", "lt_r5_0x000002ac"},
+ {{cc, r13, 0x0000ff00},
+ false,
+ al,
+ "cc r13 0x0000ff00",
+ "cc_r13_"
+ "0x0000ff00"},
+ {{hi, r15, 0x000002ac},
+ false,
+ al,
+ "hi r15 0x000002ac",
+ "hi_r15_"
+ "0x000002ac"},
+ {{ge, r1, 0x00ff0000}, false, al, "ge r1 0x00ff0000", "ge_r1_0x00ff0000"},
+ {{lt, r15, 0x00002ac0},
+ false,
+ al,
+ "lt r15 0x00002ac0",
+ "lt_r15_"
+ "0x00002ac0"},
+ {{lt, r8, 0x000ff000}, false, al, "lt r8 0x000ff000", "lt_r8_0x000ff000"},
+ {{hi, r10, 0xc000002a},
+ false,
+ al,
+ "hi r10 0xc000002a",
+ "hi_r10_"
+ "0xc000002a"},
+ {{eq, r12, 0x000ab000},
+ false,
+ al,
+ "eq r12 0x000ab000",
+ "eq_r12_"
+ "0x000ab000"},
+ {{vs, r11, 0x00002ac0},
+ false,
+ al,
+ "vs r11 0x00002ac0",
+ "vs_r11_"
+ "0x00002ac0"},
+ {{hi, r10, 0x000003fc},
+ false,
+ al,
+ "hi r10 0x000003fc",
+ "hi_r10_"
+ "0x000003fc"},
+ {{cc, r8, 0x000003fc}, false, al, "cc r8 0x000003fc", "cc_r8_0x000003fc"},
+ {{vc, r11, 0x00000ab0},
+ false,
+ al,
+ "vc r11 0x00000ab0",
+ "vc_r11_"
+ "0x00000ab0"},
+ {{le, r3, 0xac000002}, false, al, "le r3 0xac000002", "le_r3_0xac000002"},
+ {{cc, r11, 0xc000002a},
+ false,
+ al,
+ "cc r11 0xc000002a",
+ "cc_r11_"
+ "0xc000002a"},
+ {{lt, r6, 0xab000000}, false, al, "lt r6 0xab000000", "lt_r6_0xab000000"},
+ {{hi, r1, 0x00003fc0}, false, al, "hi r1 0x00003fc0", "hi_r1_0x00003fc0"},
+ {{vc, r3, 0x00002ac0}, false, al, "vc r3 0x00002ac0", "vc_r3_0x00002ac0"},
+ {{vc, r6, 0x00000ab0}, false, al, "vc r6 0x00000ab0", "vc_r6_0x00000ab0"},
+ {{ls, r6, 0x03fc0000}, false, al, "ls r6 0x03fc0000", "ls_r6_0x03fc0000"},
+ {{hi, r11, 0x0ab00000},
+ false,
+ al,
+ "hi r11 0x0ab00000",
+ "hi_r11_"
+ "0x0ab00000"},
+ {{lt, r12, 0x0002ac00},
+ false,
+ al,
+ "lt r12 0x0002ac00",
+ "lt_r12_"
+ "0x0002ac00"},
+ {{al, r8, 0xab000000}, false, al, "al r8 0xab000000", "al_r8_0xab000000"},
+ {{vs, r2, 0x00000ab0}, false, al, "vs r2 0x00000ab0", "vs_r2_0x00000ab0"},
+ {{hi, r14, 0x02ac0000},
+ false,
+ al,
+ "hi r14 0x02ac0000",
+ "hi_r14_"
+ "0x02ac0000"},
+ {{cs, r3, 0x00000ff0}, false, al, "cs r3 0x00000ff0", "cs_r3_0x00000ff0"},
+ {{cc, r9, 0xb000000a}, false, al, "cc r9 0xb000000a", "cc_r9_0xb000000a"},
+ {{vc, r9, 0x00000ff0}, false, al, "vc r9 0x00000ff0", "vc_r9_0x00000ff0"},
+ {{ne, r9, 0xab000000}, false, al, "ne r9 0xab000000", "ne_r9_0xab000000"},
+ {{cc, r10, 0xb000000a},
+ false,
+ al,
+ "cc r10 0xb000000a",
+ "cc_r10_"
+ "0xb000000a"},
+ {{ls, r11, 0xb000000a},
+ false,
+ al,
+ "ls r11 0xb000000a",
+ "ls_r11_"
+ "0xb000000a"},
+ {{lt, r11, 0x00ff0000},
+ false,
+ al,
+ "lt r11 0x00ff0000",
+ "lt_r11_"
+ "0x00ff0000"},
+ {{lt, r3, 0x000003fc}, false, al, "lt r3 0x000003fc", "lt_r3_0x000003fc"},
+ {{gt, r14, 0x00002ac0},
+ false,
+ al,
+ "gt r14 0x00002ac0",
+ "gt_r14_"
+ "0x00002ac0"},
+ {{ls, r8, 0xc000003f}, false, al, "ls r8 0xc000003f", "ls_r8_0xc000003f"},
+ {{al, r11, 0x000ab000},
+ false,
+ al,
+ "al r11 0x000ab000",
+ "al_r11_"
+ "0x000ab000"},
+ {{lt, r7, 0x000ab000}, false, al, "lt r7 0x000ab000", "lt_r7_0x000ab000"},
+ {{vs, r14, 0xff000000},
+ false,
+ al,
+ "vs r14 0xff000000",
+ "vs_r14_"
+ "0xff000000"},
+ {{vc, r2, 0xab000000}, false, al, "vc r2 0xab000000", "vc_r2_0xab000000"},
+ {{ne, r3, 0x00000ff0}, false, al, "ne r3 0x00000ff0", "ne_r3_0x00000ff0"},
+ {{ne, r15, 0x02ac0000},
+ false,
+ al,
+ "ne r15 0x02ac0000",
+ "ne_r15_"
+ "0x02ac0000"},
+ {{gt, r3, 0x000ff000}, false, al, "gt r3 0x000ff000", "gt_r3_0x000ff000"},
+ {{pl, r1, 0x2ac00000}, false, al, "pl r1 0x2ac00000", "pl_r1_0x2ac00000"},
+ {{mi, r1, 0x00002ac0}, false, al, "mi r1 0x00002ac0", "mi_r1_0x00002ac0"},
+ {{vc, r6, 0xac000002}, false, al, "vc r6 0xac000002", "vc_r6_0xac000002"},
+ {{vs, r2, 0x0ff00000}, false, al, "vs r2 0x0ff00000", "vs_r2_0x0ff00000"},
+ {{ge, r2, 0x000003fc}, false, al, "ge r2 0x000003fc", "ge_r2_0x000003fc"},
+ {{cs, r15, 0x0000ff00},
+ false,
+ al,
+ "cs r15 0x0000ff00",
+ "cs_r15_"
+ "0x0000ff00"},
+ {{lt, r3, 0x000002ac}, false, al, "lt r3 0x000002ac", "lt_r3_0x000002ac"},
+ {{cs, r6, 0xff000000}, false, al, "cs r6 0xff000000", "cs_r6_0xff000000"},
+ {{ge, r14, 0x000000ff},
+ false,
+ al,
+ "ge r14 0x000000ff",
+ "ge_r14_"
+ "0x000000ff"},
+ {{gt, r7, 0x03fc0000}, false, al, "gt r7 0x03fc0000", "gt_r7_0x03fc0000"},
+ {{ne, r8, 0x000ff000}, false, al, "ne r8 0x000ff000", "ne_r8_0x000ff000"},
+ {{gt, r14, 0xc000002a},
+ false,
+ al,
+ "gt r14 0xc000002a",
+ "gt_r14_"
+ "0xc000002a"},
+ {{hi, r12, 0x0000ff00},
+ false,
+ al,
+ "hi r12 0x0000ff00",
+ "hi_r12_"
+ "0x0000ff00"},
+ {{le, r15, 0x00003fc0},
+ false,
+ al,
+ "le r15 0x00003fc0",
+ "le_r15_"
+ "0x00003fc0"},
+ {{eq, r13, 0x000ab000},
+ false,
+ al,
+ "eq r13 0x000ab000",
+ "eq_r13_"
+ "0x000ab000"},
+ {{vc, r7, 0x000ab000}, false, al, "vc r7 0x000ab000", "vc_r7_0x000ab000"},
+ {{gt, r7, 0xf000000f}, false, al, "gt r7 0xf000000f", "gt_r7_0xf000000f"},
+ {{cc, r6, 0xac000002}, false, al, "cc r6 0xac000002", "cc_r6_0xac000002"},
+ {{cs, r14, 0x000000ff},
+ false,
+ al,
+ "cs r14 0x000000ff",
+ "cs_r14_"
+ "0x000000ff"},
+ {{ne, r2, 0x0003fc00}, false, al, "ne r2 0x0003fc00", "ne_r2_0x0003fc00"},
+ {{vs, r1, 0x002ac000}, false, al, "vs r1 0x002ac000", "vs_r1_0x002ac000"},
+ {{eq, r8, 0x002ac000}, false, al, "eq r8 0x002ac000", "eq_r8_0x002ac000"},
+ {{lt, r8, 0x0000ff00}, false, al, "lt r8 0x0000ff00", "lt_r8_0x0000ff00"},
+ {{vs, r9, 0xc000003f}, false, al, "vs r9 0xc000003f", "vs_r9_0xc000003f"},
+ {{mi, r11, 0xff000000},
+ false,
+ al,
+ "mi r11 0xff000000",
+ "mi_r11_"
+ "0xff000000"},
+ {{cs, r12, 0x03fc0000},
+ false,
+ al,
+ "cs r12 0x03fc0000",
+ "cs_r12_"
+ "0x03fc0000"},
+ {{lt, r5, 0xc000002a}, false, al, "lt r5 0xc000002a", "lt_r5_0xc000002a"},
+ {{vc, r6, 0x000000ab}, false, al, "vc r6 0x000000ab", "vc_r6_0x000000ab"},
+ {{ls, r10, 0x0ab00000},
+ false,
+ al,
+ "ls r10 0x0ab00000",
+ "ls_r10_"
+ "0x0ab00000"},
+ {{al, r11, 0x00ff0000},
+ false,
+ al,
+ "al r11 0x00ff0000",
+ "al_r11_"
+ "0x00ff0000"},
+ {{hi, r13, 0x00000ab0},
+ false,
+ al,
+ "hi r13 0x00000ab0",
+ "hi_r13_"
+ "0x00000ab0"},
+ {{ls, r0, 0xab000000}, false, al, "ls r0 0xab000000", "ls_r0_0xab000000"},
+ {{le, r5, 0xab000000}, false, al, "le r5 0xab000000", "le_r5_0xab000000"},
+ {{vs, r4, 0x00ff0000}, false, al, "vs r4 0x00ff0000", "vs_r4_0x00ff0000"},
+ {{al, r10, 0x03fc0000},
+ false,
+ al,
+ "al r10 0x03fc0000",
+ "al_r10_"
+ "0x03fc0000"},
+ {{al, r8, 0x000003fc}, false, al, "al r8 0x000003fc", "al_r8_0x000003fc"},
+ {{vs, r11, 0xab000000},
+ false,
+ al,
+ "vs r11 0xab000000",
+ "vs_r11_"
+ "0xab000000"},
+ {{eq, r2, 0x00000ff0}, false, al, "eq r2 0x00000ff0", "eq_r2_0x00000ff0"},
+ {{vc, r4, 0x00000ff0}, false, al, "vc r4 0x00000ff0", "vc_r4_0x00000ff0"},
+ {{vc, r9, 0x00002ac0}, false, al, "vc r9 0x00002ac0", "vc_r9_0x00002ac0"},
+ {{cc, r11, 0x00ff0000},
+ false,
+ al,
+ "cc r11 0x00ff0000",
+ "cc_r11_"
+ "0x00ff0000"},
+ {{cc, r13, 0x00ff0000},
+ false,
+ al,
+ "cc r13 0x00ff0000",
+ "cc_r13_"
+ "0x00ff0000"},
+ {{pl, r0, 0x00000ab0}, false, al, "pl r0 0x00000ab0", "pl_r0_0x00000ab0"},
+ {{al, r2, 0x02ac0000}, false, al, "al r2 0x02ac0000", "al_r2_0x02ac0000"},
+ {{hi, r11, 0xc000002a},
+ false,
+ al,
+ "hi r11 0xc000002a",
+ "hi_r11_"
+ "0xc000002a"},
+ {{ne, r3, 0xf000000f}, false, al, "ne r3 0xf000000f", "ne_r3_0xf000000f"},
+ {{cc, r15, 0x0ab00000},
+ false,
+ al,
+ "cc r15 0x0ab00000",
+ "cc_r15_"
+ "0x0ab00000"},
+ {{ge, r12, 0x00ff0000},
+ false,
+ al,
+ "ge r12 0x00ff0000",
+ "ge_r12_"
+ "0x00ff0000"},
+ {{le, r12, 0x002ac000},
+ false,
+ al,
+ "le r12 0x002ac000",
+ "le_r12_"
+ "0x002ac000"},
+ {{mi, r12, 0xc000003f},
+ false,
+ al,
+ "mi r12 0xc000003f",
+ "mi_r12_"
+ "0xc000003f"},
+ {{lt, r0, 0xfc000003}, false, al, "lt r0 0xfc000003", "lt_r0_0xfc000003"},
+ {{vc, r15, 0x000ab000},
+ false,
+ al,
+ "vc r15 0x000ab000",
+ "vc_r15_"
+ "0x000ab000"},
+ {{pl, r5, 0x3fc00000}, false, al, "pl r5 0x3fc00000", "pl_r5_0x3fc00000"},
+ {{vs, r15, 0x00ab0000},
+ false,
+ al,
+ "vs r15 0x00ab0000",
+ "vs_r15_"
+ "0x00ab0000"},
+ {{hi, r3, 0x00ff0000}, false, al, "hi r3 0x00ff0000", "hi_r3_0x00ff0000"},
+ {{lt, r8, 0x000000ff}, false, al, "lt r8 0x000000ff", "lt_r8_0x000000ff"},
+ {{le, r2, 0x000000ff}, false, al, "le r2 0x000000ff", "le_r2_0x000000ff"},
+ {{vs, r0, 0x0002ac00}, false, al, "vs r0 0x0002ac00", "vs_r0_0x0002ac00"},
+ {{vs, r2, 0xff000000}, false, al, "vs r2 0xff000000", "vs_r2_0xff000000"},
+ {{pl, r6, 0xab000000}, false, al, "pl r6 0xab000000", "pl_r6_0xab000000"},
+ {{ls, r4, 0x3fc00000}, false, al, "ls r4 0x3fc00000", "ls_r4_0x3fc00000"},
+ {{ls, r3, 0x000ab000}, false, al, "ls r3 0x000ab000", "ls_r3_0x000ab000"},
+ {{eq, r11, 0x000ab000},
+ false,
+ al,
+ "eq r11 0x000ab000",
+ "eq_r11_"
+ "0x000ab000"},
+ {{vc, r6, 0x03fc0000}, false, al, "vc r6 0x03fc0000", "vc_r6_0x03fc0000"},
+ {{mi, r14, 0x0000ab00},
+ false,
+ al,
+ "mi r14 0x0000ab00",
+ "mi_r14_"
+ "0x0000ab00"},
+ {{pl, r8, 0xab000000}, false, al, "pl r8 0xab000000", "pl_r8_0xab000000"},
+ {{pl, r8, 0xc000003f}, false, al, "pl r8 0xc000003f", "pl_r8_0xc000003f"},
+ {{eq, r14, 0x003fc000},
+ false,
+ al,
+ "eq r14 0x003fc000",
+ "eq_r14_"
+ "0x003fc000"},
+ {{vs, r9, 0x00ff0000}, false, al, "vs r9 0x00ff0000", "vs_r9_0x00ff0000"},
+ {{vs, r1, 0x00002ac0}, false, al, "vs r1 0x00002ac0", "vs_r1_0x00002ac0"},
+ {{le, r1, 0x00ff0000}, false, al, "le r1 0x00ff0000", "le_r1_0x00ff0000"},
+ {{lt, r7, 0x000ff000}, false, al, "lt r7 0x000ff000", "lt_r7_0x000ff000"},
+ {{mi, r6, 0x002ac000}, false, al, "mi r6 0x002ac000", "mi_r6_0x002ac000"},
+ {{vc, r11, 0xc000003f},
+ false,
+ al,
+ "vc r11 0xc000003f",
+ "vc_r11_"
+ "0xc000003f"},
+ {{lt, r4, 0x00000000}, false, al, "lt r4 0x00000000", "lt_r4_0x00000000"},
+ {{pl, r0, 0xac000002}, false, al, "pl r0 0xac000002", "pl_r0_0xac000002"},
+ {{ls, r10, 0xc000003f},
+ false,
+ al,
+ "ls r10 0xc000003f",
+ "ls_r10_"
+ "0xc000003f"},
+ {{cc, r15, 0xc000002a},
+ false,
+ al,
+ "cc r15 0xc000002a",
+ "cc_r15_0xc000002a"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-assembler-cond-rd-operand-const-a32-cannot-use-pc.cc b/test/aarch32/test-assembler-cond-rd-operand-const-a32-cannot-use-pc.cc
index e012224..9d46a91 100644
--- a/test/aarch32/test-assembler-cond-rd-operand-const-a32-cannot-use-pc.cc
+++ b/test/aarch32/test-assembler-cond-rd-operand-const-a32-cannot-use-pc.cc
@@ -94,666 +94,1286 @@
};
// Each element of this array produce one instruction encoding.
-const TestData kTests[] = {
- {{ls, r0, 0x003fc000}, false, al, "ls r0 0x003fc000", "ls_r0_0x003fc000"},
- {{eq, r13, 0xff000000}, false, al, "eq r13 0xff000000", "eq_r13_"
- "0xff000000"},
- {{al, r0, 0x0002ac00}, false, al, "al r0 0x0002ac00", "al_r0_0x0002ac00"},
- {{gt, r13, 0x002ac000}, false, al, "gt r13 0x002ac000", "gt_r13_"
- "0x002ac000"},
- {{mi, r3, 0x000002ac}, false, al, "mi r3 0x000002ac", "mi_r3_0x000002ac"},
- {{ls, r0, 0x000000ff}, false, al, "ls r0 0x000000ff", "ls_r0_0x000000ff"},
- {{ls, r7, 0x0000ab00}, false, al, "ls r7 0x0000ab00", "ls_r7_0x0000ab00"},
- {{cc, r11, 0x0ff00000}, false, al, "cc r11 0x0ff00000", "cc_r11_"
- "0x0ff00000"},
- {{vs, r5, 0xc000003f}, false, al, "vs r5 0xc000003f", "vs_r5_0xc000003f"},
- {{gt, r3, 0x00ab0000}, false, al, "gt r3 0x00ab0000", "gt_r3_0x00ab0000"},
- {{hi, r7, 0xff000000}, false, al, "hi r7 0xff000000", "hi_r7_0xff000000"},
- {{vc, r10, 0xff000000}, false, al, "vc r10 0xff000000", "vc_r10_"
- "0xff000000"},
- {{hi, r10, 0x002ac000}, false, al, "hi r10 0x002ac000", "hi_r10_"
- "0x002ac000"},
- {{ne, r9, 0x00ff0000}, false, al, "ne r9 0x00ff0000", "ne_r9_0x00ff0000"},
- {{ge, r3, 0xf000000f}, false, al, "ge r3 0xf000000f", "ge_r3_0xf000000f"},
- {{hi, r0, 0x000ff000}, false, al, "hi r0 0x000ff000", "hi_r0_0x000ff000"},
- {{mi, r2, 0x00002ac0}, false, al, "mi r2 0x00002ac0", "mi_r2_0x00002ac0"},
- {{ge, r6, 0x000ab000}, false, al, "ge r6 0x000ab000", "ge_r6_0x000ab000"},
- {{mi, r9, 0x00ff0000}, false, al, "mi r9 0x00ff0000", "mi_r9_0x00ff0000"},
- {{cs, r2, 0x000ff000}, false, al, "cs r2 0x000ff000", "cs_r2_0x000ff000"},
- {{lt, r5, 0x003fc000}, false, al, "lt r5 0x003fc000", "lt_r5_0x003fc000"},
- {{al, r1, 0x000ff000}, false, al, "al r1 0x000ff000", "al_r1_0x000ff000"},
- {{mi, r6, 0x000000ab}, false, al, "mi r6 0x000000ab", "mi_r6_0x000000ab"},
- {{pl, r9, 0xac000002}, false, al, "pl r9 0xac000002", "pl_r9_0xac000002"},
- {{hi, r8, 0x000ff000}, false, al, "hi r8 0x000ff000", "hi_r8_0x000ff000"},
- {{vs, r0, 0x00002ac0}, false, al, "vs r0 0x00002ac0", "vs_r0_0x00002ac0"},
- {{ls, r4, 0xab000000}, false, al, "ls r4 0xab000000", "ls_r4_0xab000000"},
- {{vs, r6, 0x000ab000}, false, al, "vs r6 0x000ab000", "vs_r6_0x000ab000"},
- {{vc, r13, 0x0000ab00}, false, al, "vc r13 0x0000ab00", "vc_r13_"
- "0x0000ab00"},
- {{mi, r0, 0xab000000}, false, al, "mi r0 0xab000000", "mi_r0_0xab000000"},
- {{vs, r9, 0x0ab00000}, false, al, "vs r9 0x0ab00000", "vs_r9_0x0ab00000"},
- {{pl, r0, 0x00003fc0}, false, al, "pl r0 0x00003fc0", "pl_r0_0x00003fc0"},
- {{al, r2, 0x0000ff00}, false, al, "al r2 0x0000ff00", "al_r2_0x0000ff00"},
- {{gt, r11, 0x00000ab0}, false, al, "gt r11 0x00000ab0", "gt_r11_"
- "0x00000ab0"},
- {{vs, r10, 0xac000002}, false, al, "vs r10 0xac000002", "vs_r10_"
- "0xac000002"},
- {{cs, r3, 0x0002ac00}, false, al, "cs r3 0x0002ac00", "cs_r3_0x0002ac00"},
- {{vc, r13, 0x000000ab}, false, al, "vc r13 0x000000ab", "vc_r13_"
- "0x000000ab"},
- {{cs, r11, 0x003fc000}, false, al, "cs r11 0x003fc000", "cs_r11_"
- "0x003fc000"},
- {{vs, r14, 0x0000ab00}, false, al, "vs r14 0x0000ab00", "vs_r14_"
- "0x0000ab00"},
- {{eq, r11, 0xfc000003}, false, al, "eq r11 0xfc000003", "eq_r11_"
- "0xfc000003"},
- {{pl, r13, 0x00000ab0}, false, al, "pl r13 0x00000ab0", "pl_r13_"
- "0x00000ab0"},
- {{ge, r4, 0xb000000a}, false, al, "ge r4 0xb000000a", "ge_r4_0xb000000a"},
- {{pl, r12, 0x00003fc0}, false, al, "pl r12 0x00003fc0", "pl_r12_"
- "0x00003fc0"},
- {{le, r4, 0x0000ff00}, false, al, "le r4 0x0000ff00", "le_r4_0x0000ff00"},
- {{pl, r4, 0x003fc000}, false, al, "pl r4 0x003fc000", "pl_r4_0x003fc000"},
- {{hi, r0, 0x002ac000}, false, al, "hi r0 0x002ac000", "hi_r0_0x002ac000"},
- {{mi, r1, 0x00003fc0}, false, al, "mi r1 0x00003fc0", "mi_r1_0x00003fc0"},
- {{hi, r9, 0xf000000f}, false, al, "hi r9 0xf000000f", "hi_r9_0xf000000f"},
- {{al, r11, 0x0000ab00}, false, al, "al r11 0x0000ab00", "al_r11_"
- "0x0000ab00"},
- {{ne, r6, 0x00ab0000}, false, al, "ne r6 0x00ab0000", "ne_r6_0x00ab0000"},
- {{lt, r4, 0xff000000}, false, al, "lt r4 0xff000000", "lt_r4_0xff000000"},
- {{pl, r0, 0x0ab00000}, false, al, "pl r0 0x0ab00000", "pl_r0_0x0ab00000"},
- {{ls, r2, 0xc000002a}, false, al, "ls r2 0xc000002a", "ls_r2_0xc000002a"},
- {{lt, r5, 0x00003fc0}, false, al, "lt r5 0x00003fc0", "lt_r5_0x00003fc0"},
- {{mi, r5, 0x000003fc}, false, al, "mi r5 0x000003fc", "mi_r5_0x000003fc"},
- {{ls, r11, 0xb000000a}, false, al, "ls r11 0xb000000a", "ls_r11_"
- "0xb000000a"},
- {{al, r3, 0x0000ff00}, false, al, "al r3 0x0000ff00", "al_r3_0x0000ff00"},
- {{vs, r3, 0xfc000003}, false, al, "vs r3 0xfc000003", "vs_r3_0xfc000003"},
- {{ne, r1, 0xc000002a}, false, al, "ne r1 0xc000002a", "ne_r1_0xc000002a"},
- {{eq, r10, 0x0003fc00}, false, al, "eq r10 0x0003fc00", "eq_r10_"
- "0x0003fc00"},
- {{eq, r3, 0xf000000f}, false, al, "eq r3 0xf000000f", "eq_r3_0xf000000f"},
- {{vs, r5, 0x000ff000}, false, al, "vs r5 0x000ff000", "vs_r5_0x000ff000"},
- {{ge, r12, 0x000000ab}, false, al, "ge r12 0x000000ab", "ge_r12_"
- "0x000000ab"},
- {{vc, r12, 0xf000000f}, false, al, "vc r12 0xf000000f", "vc_r12_"
- "0xf000000f"},
- {{lt, r11, 0x02ac0000}, false, al, "lt r11 0x02ac0000", "lt_r11_"
- "0x02ac0000"},
- {{vs, r6, 0x003fc000}, false, al, "vs r6 0x003fc000", "vs_r6_0x003fc000"},
- {{cs, r8, 0x3fc00000}, false, al, "cs r8 0x3fc00000", "cs_r8_0x3fc00000"},
- {{le, r5, 0x0002ac00}, false, al, "le r5 0x0002ac00", "le_r5_0x0002ac00"},
- {{ls, r9, 0x0002ac00}, false, al, "ls r9 0x0002ac00", "ls_r9_0x0002ac00"},
- {{al, r4, 0x3fc00000}, false, al, "al r4 0x3fc00000", "al_r4_0x3fc00000"},
- {{lt, r5, 0x000000ff}, false, al, "lt r5 0x000000ff", "lt_r5_0x000000ff"},
- {{cs, r8, 0xc000002a}, false, al, "cs r8 0xc000002a", "cs_r8_0xc000002a"},
- {{cs, r0, 0x00000ab0}, false, al, "cs r0 0x00000ab0", "cs_r0_0x00000ab0"},
- {{cs, r2, 0x3fc00000}, false, al, "cs r2 0x3fc00000", "cs_r2_0x3fc00000"},
- {{vs, r14, 0xab000000}, false, al, "vs r14 0xab000000", "vs_r14_"
- "0xab000000"},
- {{ne, r8, 0x002ac000}, false, al, "ne r8 0x002ac000", "ne_r8_0x002ac000"},
- {{vs, r1, 0x003fc000}, false, al, "vs r1 0x003fc000", "vs_r1_0x003fc000"},
- {{al, r7, 0x003fc000}, false, al, "al r7 0x003fc000", "al_r7_0x003fc000"},
- {{vs, r7, 0x000ab000}, false, al, "vs r7 0x000ab000", "vs_r7_0x000ab000"},
- {{vc, r12, 0xb000000a}, false, al, "vc r12 0xb000000a", "vc_r12_"
- "0xb000000a"},
- {{eq, r2, 0xc000002a}, false, al, "eq r2 0xc000002a", "eq_r2_0xc000002a"},
- {{lt, r4, 0x0000ff00}, false, al, "lt r4 0x0000ff00", "lt_r4_0x0000ff00"},
- {{eq, r8, 0x2ac00000}, false, al, "eq r8 0x2ac00000", "eq_r8_0x2ac00000"},
- {{hi, r7, 0x3fc00000}, false, al, "hi r7 0x3fc00000", "hi_r7_0x3fc00000"},
- {{mi, r13, 0x3fc00000}, false, al, "mi r13 0x3fc00000", "mi_r13_"
- "0x3fc00000"},
- {{al, r2, 0x0002ac00}, false, al, "al r2 0x0002ac00", "al_r2_0x0002ac00"},
- {{gt, r13, 0xab000000}, false, al, "gt r13 0xab000000", "gt_r13_"
- "0xab000000"},
- {{vs, r3, 0x00000ab0}, false, al, "vs r3 0x00000ab0", "vs_r3_0x00000ab0"},
- {{mi, r14, 0x00000000}, false, al, "mi r14 0x00000000", "mi_r14_"
- "0x00000000"},
- {{vs, r10, 0x3fc00000}, false, al, "vs r10 0x3fc00000", "vs_r10_"
- "0x3fc00000"},
- {{vc, r7, 0x0ff00000}, false, al, "vc r7 0x0ff00000", "vc_r7_0x0ff00000"},
- {{al, r3, 0xf000000f}, false, al, "al r3 0xf000000f", "al_r3_0xf000000f"},
- {{cs, r12, 0x03fc0000}, false, al, "cs r12 0x03fc0000", "cs_r12_"
- "0x03fc0000"},
- {{hi, r14, 0xab000000}, false, al, "hi r14 0xab000000", "hi_r14_"
- "0xab000000"},
- {{mi, r13, 0x000002ac}, false, al, "mi r13 0x000002ac", "mi_r13_"
- "0x000002ac"},
- {{ge, r8, 0x2ac00000}, false, al, "ge r8 0x2ac00000", "ge_r8_0x2ac00000"},
- {{vc, r14, 0x000003fc}, false, al, "vc r14 0x000003fc", "vc_r14_"
- "0x000003fc"},
- {{mi, r4, 0x0000ab00}, false, al, "mi r4 0x0000ab00", "mi_r4_0x0000ab00"},
- {{hi, r11, 0x0ff00000}, false, al, "hi r11 0x0ff00000", "hi_r11_"
- "0x0ff00000"},
- {{gt, r8, 0x000ff000}, false, al, "gt r8 0x000ff000", "gt_r8_0x000ff000"},
- {{lt, r1, 0x3fc00000}, false, al, "lt r1 0x3fc00000", "lt_r1_0x3fc00000"},
- {{mi, r12, 0x000000ab}, false, al, "mi r12 0x000000ab", "mi_r12_"
- "0x000000ab"},
- {{vs, r12, 0x000003fc}, false, al, "vs r12 0x000003fc", "vs_r12_"
- "0x000003fc"},
- {{cs, r10, 0x000ff000}, false, al, "cs r10 0x000ff000", "cs_r10_"
- "0x000ff000"},
- {{mi, r12, 0xfc000003}, false, al, "mi r12 0xfc000003", "mi_r12_"
- "0xfc000003"},
- {{pl, r8, 0x000000ff}, false, al, "pl r8 0x000000ff", "pl_r8_0x000000ff"},
- {{lt, r5, 0x00002ac0}, false, al, "lt r5 0x00002ac0", "lt_r5_0x00002ac0"},
- {{ge, r0, 0xff000000}, false, al, "ge r0 0xff000000", "ge_r0_0xff000000"},
- {{pl, r0, 0xab000000}, false, al, "pl r0 0xab000000", "pl_r0_0xab000000"},
- {{ls, r8, 0x000ab000}, false, al, "ls r8 0x000ab000", "ls_r8_0x000ab000"},
- {{al, r12, 0x00000ff0}, false, al, "al r12 0x00000ff0", "al_r12_"
- "0x00000ff0"},
- {{hi, r5, 0x00003fc0}, false, al, "hi r5 0x00003fc0", "hi_r5_0x00003fc0"},
- {{vs, r7, 0x0003fc00}, false, al, "vs r7 0x0003fc00", "vs_r7_0x0003fc00"},
- {{ls, r13, 0x0ff00000}, false, al, "ls r13 0x0ff00000", "ls_r13_"
- "0x0ff00000"},
- {{al, r7, 0x000000ab}, false, al, "al r7 0x000000ab", "al_r7_0x000000ab"},
- {{lt, r6, 0x000003fc}, false, al, "lt r6 0x000003fc", "lt_r6_0x000003fc"},
- {{mi, r9, 0xc000002a}, false, al, "mi r9 0xc000002a", "mi_r9_0xc000002a"},
- {{ne, r11, 0x003fc000}, false, al, "ne r11 0x003fc000", "ne_r11_"
- "0x003fc000"},
- {{cs, r4, 0x00000ab0}, false, al, "cs r4 0x00000ab0", "cs_r4_0x00000ab0"},
- {{vc, r14, 0x2ac00000}, false, al, "vc r14 0x2ac00000", "vc_r14_"
- "0x2ac00000"},
- {{vc, r8, 0x2ac00000}, false, al, "vc r8 0x2ac00000", "vc_r8_0x2ac00000"},
- {{ge, r10, 0x003fc000}, false, al, "ge r10 0x003fc000", "ge_r10_"
- "0x003fc000"},
- {{lt, r14, 0xb000000a}, false, al, "lt r14 0xb000000a", "lt_r14_"
- "0xb000000a"},
- {{cs, r12, 0x000ff000}, false, al, "cs r12 0x000ff000", "cs_r12_"
- "0x000ff000"},
- {{eq, r2, 0xac000002}, false, al, "eq r2 0xac000002", "eq_r2_0xac000002"},
- {{le, r11, 0x2ac00000}, false, al, "le r11 0x2ac00000", "le_r11_"
- "0x2ac00000"},
- {{le, r8, 0xab000000}, false, al, "le r8 0xab000000", "le_r8_0xab000000"},
- {{lt, r5, 0x02ac0000}, false, al, "lt r5 0x02ac0000", "lt_r5_0x02ac0000"},
- {{hi, r13, 0x003fc000}, false, al, "hi r13 0x003fc000", "hi_r13_"
- "0x003fc000"},
- {{mi, r1, 0xfc000003}, false, al, "mi r1 0xfc000003", "mi_r1_0xfc000003"},
- {{cc, r4, 0x0ab00000}, false, al, "cc r4 0x0ab00000", "cc_r4_0x0ab00000"},
- {{lt, r7, 0x3fc00000}, false, al, "lt r7 0x3fc00000", "lt_r7_0x3fc00000"},
- {{lt, r3, 0x000002ac}, false, al, "lt r3 0x000002ac", "lt_r3_0x000002ac"},
- {{pl, r7, 0x00000000}, false, al, "pl r7 0x00000000", "pl_r7_0x00000000"},
- {{lt, r2, 0x00000000}, false, al, "lt r2 0x00000000", "lt_r2_0x00000000"},
- {{le, r1, 0x0ff00000}, false, al, "le r1 0x0ff00000", "le_r1_0x0ff00000"},
- {{lt, r14, 0x0000ff00}, false, al, "lt r14 0x0000ff00", "lt_r14_"
- "0x0000ff00"},
- {{cs, r11, 0x3fc00000}, false, al, "cs r11 0x3fc00000", "cs_r11_"
- "0x3fc00000"},
- {{ls, r7, 0x00000ff0}, false, al, "ls r7 0x00000ff0", "ls_r7_0x00000ff0"},
- {{vs, r3, 0x0ab00000}, false, al, "vs r3 0x0ab00000", "vs_r3_0x0ab00000"},
- {{cs, r12, 0x0003fc00}, false, al, "cs r12 0x0003fc00", "cs_r12_"
- "0x0003fc00"},
- {{vc, r3, 0xfc000003}, false, al, "vc r3 0xfc000003", "vc_r3_0xfc000003"},
- {{vs, r14, 0x0ff00000}, false, al, "vs r14 0x0ff00000", "vs_r14_"
- "0x0ff00000"},
- {{vc, r5, 0x00003fc0}, false, al, "vc r5 0x00003fc0", "vc_r5_0x00003fc0"},
- {{cc, r14, 0x0ff00000}, false, al, "cc r14 0x0ff00000", "cc_r14_"
- "0x0ff00000"},
- {{cs, r7, 0x0ff00000}, false, al, "cs r7 0x0ff00000", "cs_r7_0x0ff00000"},
- {{al, r2, 0x00ab0000}, false, al, "al r2 0x00ab0000", "al_r2_0x00ab0000"},
- {{gt, r0, 0x00000000}, false, al, "gt r0 0x00000000", "gt_r0_0x00000000"},
- {{al, r6, 0x000003fc}, false, al, "al r6 0x000003fc", "al_r6_0x000003fc"},
- {{ge, r2, 0x2ac00000}, false, al, "ge r2 0x2ac00000", "ge_r2_0x2ac00000"},
- {{vs, r0, 0x03fc0000}, false, al, "vs r0 0x03fc0000", "vs_r0_0x03fc0000"},
- {{cs, r12, 0x00003fc0}, false, al, "cs r12 0x00003fc0", "cs_r12_"
- "0x00003fc0"},
- {{ge, r3, 0x2ac00000}, false, al, "ge r3 0x2ac00000", "ge_r3_0x2ac00000"},
- {{le, r13, 0x0000ab00}, false, al, "le r13 0x0000ab00", "le_r13_"
- "0x0000ab00"},
- {{al, r13, 0x02ac0000}, false, al, "al r13 0x02ac0000", "al_r13_"
- "0x02ac0000"},
- {{mi, r4, 0xff000000}, false, al, "mi r4 0xff000000", "mi_r4_0xff000000"},
- {{cs, r3, 0x00003fc0}, false, al, "cs r3 0x00003fc0", "cs_r3_0x00003fc0"},
- {{ge, r13, 0x00ab0000}, false, al, "ge r13 0x00ab0000", "ge_r13_"
- "0x00ab0000"},
- {{ne, r4, 0x00000ab0}, false, al, "ne r4 0x00000ab0", "ne_r4_0x00000ab0"},
- {{cc, r3, 0xac000002}, false, al, "cc r3 0xac000002", "cc_r3_0xac000002"},
- {{pl, r11, 0xab000000}, false, al, "pl r11 0xab000000", "pl_r11_"
- "0xab000000"},
- {{eq, r13, 0xfc000003}, false, al, "eq r13 0xfc000003", "eq_r13_"
- "0xfc000003"},
- {{ne, r5, 0xc000003f}, false, al, "ne r5 0xc000003f", "ne_r5_0xc000003f"},
- {{hi, r7, 0xb000000a}, false, al, "hi r7 0xb000000a", "hi_r7_0xb000000a"},
- {{al, r12, 0xc000002a}, false, al, "al r12 0xc000002a", "al_r12_"
- "0xc000002a"},
- {{vs, r8, 0xf000000f}, false, al, "vs r8 0xf000000f", "vs_r8_0xf000000f"},
- {{cs, r6, 0x00ff0000}, false, al, "cs r6 0x00ff0000", "cs_r6_0x00ff0000"},
- {{vs, r1, 0x00002ac0}, false, al, "vs r1 0x00002ac0", "vs_r1_0x00002ac0"},
- {{ls, r2, 0x0ff00000}, false, al, "ls r2 0x0ff00000", "ls_r2_0x0ff00000"},
- {{mi, r13, 0x0000ab00}, false, al, "mi r13 0x0000ab00", "mi_r13_"
- "0x0000ab00"},
- {{al, r4, 0xff000000}, false, al, "al r4 0xff000000", "al_r4_0xff000000"},
- {{ne, r1, 0x00002ac0}, false, al, "ne r1 0x00002ac0", "ne_r1_0x00002ac0"},
- {{vc, r14, 0x000002ac}, false, al, "vc r14 0x000002ac", "vc_r14_"
- "0x000002ac"},
- {{al, r9, 0xb000000a}, false, al, "al r9 0xb000000a", "al_r9_0xb000000a"},
- {{ne, r9, 0x000002ac}, false, al, "ne r9 0x000002ac", "ne_r9_0x000002ac"},
- {{hi, r7, 0x0ff00000}, false, al, "hi r7 0x0ff00000", "hi_r7_0x0ff00000"},
- {{ne, r10, 0x000ab000}, false, al, "ne r10 0x000ab000", "ne_r10_"
- "0x000ab000"},
- {{vs, r0, 0x0003fc00}, false, al, "vs r0 0x0003fc00", "vs_r0_0x0003fc00"},
- {{cs, r9, 0x002ac000}, false, al, "cs r9 0x002ac000", "cs_r9_0x002ac000"},
- {{eq, r3, 0x00000ff0}, false, al, "eq r3 0x00000ff0", "eq_r3_0x00000ff0"},
- {{lt, r5, 0x00ab0000}, false, al, "lt r5 0x00ab0000", "lt_r5_0x00ab0000"},
- {{pl, r1, 0x00ff0000}, false, al, "pl r1 0x00ff0000", "pl_r1_0x00ff0000"},
- {{eq, r4, 0x0000ab00}, false, al, "eq r4 0x0000ab00", "eq_r4_0x0000ab00"},
- {{mi, r13, 0x000000ab}, false, al, "mi r13 0x000000ab", "mi_r13_"
- "0x000000ab"},
- {{pl, r12, 0x00000ff0}, false, al, "pl r12 0x00000ff0", "pl_r12_"
- "0x00000ff0"},
- {{eq, r3, 0x00002ac0}, false, al, "eq r3 0x00002ac0", "eq_r3_0x00002ac0"},
- {{le, r12, 0x002ac000}, false, al, "le r12 0x002ac000", "le_r12_"
- "0x002ac000"},
- {{ge, r10, 0x000002ac}, false, al, "ge r10 0x000002ac", "ge_r10_"
- "0x000002ac"},
- {{vs, r1, 0x00ff0000}, false, al, "vs r1 0x00ff0000", "vs_r1_0x00ff0000"},
- {{pl, r8, 0x0000ff00}, false, al, "pl r8 0x0000ff00", "pl_r8_0x0000ff00"},
- {{vs, r9, 0x000ab000}, false, al, "vs r9 0x000ab000", "vs_r9_0x000ab000"},
- {{ls, r6, 0x003fc000}, false, al, "ls r6 0x003fc000", "ls_r6_0x003fc000"},
- {{vs, r14, 0x0ab00000}, false, al, "vs r14 0x0ab00000", "vs_r14_"
- "0x0ab00000"},
- {{mi, r14, 0xf000000f}, false, al, "mi r14 0xf000000f", "mi_r14_"
- "0xf000000f"},
- {{vc, r6, 0xf000000f}, false, al, "vc r6 0xf000000f", "vc_r6_0xf000000f"},
- {{ne, r4, 0x0000ff00}, false, al, "ne r4 0x0000ff00", "ne_r4_0x0000ff00"},
- {{gt, r10, 0xfc000003}, false, al, "gt r10 0xfc000003", "gt_r10_"
- "0xfc000003"},
- {{cs, r6, 0x3fc00000}, false, al, "cs r6 0x3fc00000", "cs_r6_0x3fc00000"},
- {{al, r10, 0x0ff00000}, false, al, "al r10 0x0ff00000", "al_r10_"
- "0x0ff00000"},
- {{pl, r12, 0x00000000}, false, al, "pl r12 0x00000000", "pl_r12_"
- "0x00000000"},
- {{cc, r5, 0xfc000003}, false, al, "cc r5 0xfc000003", "cc_r5_0xfc000003"},
- {{pl, r10, 0x0003fc00}, false, al, "pl r10 0x0003fc00", "pl_r10_"
- "0x0003fc00"},
- {{eq, r8, 0xac000002}, false, al, "eq r8 0xac000002", "eq_r8_0xac000002"},
- {{vs, r12, 0xac000002}, false, al, "vs r12 0xac000002", "vs_r12_"
- "0xac000002"},
- {{ne, r9, 0x00ab0000}, false, al, "ne r9 0x00ab0000", "ne_r9_0x00ab0000"},
- {{al, r1, 0x0002ac00}, false, al, "al r1 0x0002ac00", "al_r1_0x0002ac00"},
- {{ne, r6, 0x0000ff00}, false, al, "ne r6 0x0000ff00", "ne_r6_0x0000ff00"},
- {{mi, r3, 0x03fc0000}, false, al, "mi r3 0x03fc0000", "mi_r3_0x03fc0000"},
- {{ge, r10, 0x0002ac00}, false, al, "ge r10 0x0002ac00", "ge_r10_"
- "0x0002ac00"},
- {{vc, r5, 0xb000000a}, false, al, "vc r5 0xb000000a", "vc_r5_0xb000000a"},
- {{pl, r1, 0x000003fc}, false, al, "pl r1 0x000003fc", "pl_r1_0x000003fc"},
- {{mi, r2, 0x02ac0000}, false, al, "mi r2 0x02ac0000", "mi_r2_0x02ac0000"},
- {{gt, r7, 0x0003fc00}, false, al, "gt r7 0x0003fc00", "gt_r7_0x0003fc00"},
- {{vs, r0, 0x00000000}, false, al, "vs r0 0x00000000", "vs_r0_0x00000000"},
- {{vc, r11, 0xc000003f}, false, al, "vc r11 0xc000003f", "vc_r11_"
- "0xc000003f"},
- {{vc, r13, 0x0ab00000}, false, al, "vc r13 0x0ab00000", "vc_r13_"
- "0x0ab00000"},
- {{ge, r5, 0x0002ac00}, false, al, "ge r5 0x0002ac00", "ge_r5_0x0002ac00"},
- {{ge, r8, 0xc000003f}, false, al, "ge r8 0xc000003f", "ge_r8_0xc000003f"},
- {{al, r14, 0x000002ac}, false, al, "al r14 0x000002ac", "al_r14_"
- "0x000002ac"},
- {{vs, r1, 0x00000000}, false, al, "vs r1 0x00000000", "vs_r1_0x00000000"},
- {{vc, r2, 0x3fc00000}, false, al, "vc r2 0x3fc00000", "vc_r2_0x3fc00000"},
- {{ne, r2, 0xc000003f}, false, al, "ne r2 0xc000003f", "ne_r2_0xc000003f"},
- {{cs, r0, 0x0ab00000}, false, al, "cs r0 0x0ab00000", "cs_r0_0x0ab00000"},
- {{le, r5, 0xfc000003}, false, al, "le r5 0xfc000003", "le_r5_0xfc000003"},
- {{cs, r3, 0x000002ac}, false, al, "cs r3 0x000002ac", "cs_r3_0x000002ac"},
- {{hi, r3, 0x0000ab00}, false, al, "hi r3 0x0000ab00", "hi_r3_0x0000ab00"},
- {{ge, r9, 0x00ab0000}, false, al, "ge r9 0x00ab0000", "ge_r9_0x00ab0000"},
- {{le, r0, 0x000ab000}, false, al, "le r0 0x000ab000", "le_r0_0x000ab000"},
- {{cc, r7, 0x000003fc}, false, al, "cc r7 0x000003fc", "cc_r7_0x000003fc"},
- {{pl, r7, 0x00002ac0}, false, al, "pl r7 0x00002ac0", "pl_r7_0x00002ac0"},
- {{cc, r1, 0x00000ab0}, false, al, "cc r1 0x00000ab0", "cc_r1_0x00000ab0"},
- {{le, r8, 0x0002ac00}, false, al, "le r8 0x0002ac00", "le_r8_0x0002ac00"},
- {{mi, r9, 0x0003fc00}, false, al, "mi r9 0x0003fc00", "mi_r9_0x0003fc00"},
- {{cs, r2, 0x000002ac}, false, al, "cs r2 0x000002ac", "cs_r2_0x000002ac"},
- {{vc, r2, 0xb000000a}, false, al, "vc r2 0xb000000a", "vc_r2_0xb000000a"},
- {{pl, r4, 0x000ab000}, false, al, "pl r4 0x000ab000", "pl_r4_0x000ab000"},
- {{hi, r0, 0x0003fc00}, false, al, "hi r0 0x0003fc00", "hi_r0_0x0003fc00"},
- {{vs, r12, 0x000ab000}, false, al, "vs r12 0x000ab000", "vs_r12_"
- "0x000ab000"},
- {{lt, r9, 0x0003fc00}, false, al, "lt r9 0x0003fc00", "lt_r9_0x0003fc00"},
- {{mi, r11, 0x00002ac0}, false, al, "mi r11 0x00002ac0", "mi_r11_"
- "0x00002ac0"},
- {{ls, r13, 0x000ab000}, false, al, "ls r13 0x000ab000", "ls_r13_"
- "0x000ab000"},
- {{al, r3, 0x3fc00000}, false, al, "al r3 0x3fc00000", "al_r3_0x3fc00000"},
- {{eq, r14, 0x000000ff}, false, al, "eq r14 0x000000ff", "eq_r14_"
- "0x000000ff"},
- {{le, r12, 0xff000000}, false, al, "le r12 0xff000000", "le_r12_"
- "0xff000000"},
- {{gt, r8, 0xff000000}, false, al, "gt r8 0xff000000", "gt_r8_0xff000000"},
- {{eq, r0, 0x00ff0000}, false, al, "eq r0 0x00ff0000", "eq_r0_0x00ff0000"},
- {{cc, r5, 0xff000000}, false, al, "cc r5 0xff000000", "cc_r5_0xff000000"},
- {{mi, r2, 0x0003fc00}, false, al, "mi r2 0x0003fc00", "mi_r2_0x0003fc00"},
- {{cs, r10, 0xf000000f}, false, al, "cs r10 0xf000000f", "cs_r10_"
- "0xf000000f"},
- {{eq, r0, 0xab000000}, false, al, "eq r0 0xab000000", "eq_r0_0xab000000"},
- {{al, r1, 0x03fc0000}, false, al, "al r1 0x03fc0000", "al_r1_0x03fc0000"},
- {{ne, r5, 0xff000000}, false, al, "ne r5 0xff000000", "ne_r5_0xff000000"},
- {{ne, r1, 0x03fc0000}, false, al, "ne r1 0x03fc0000", "ne_r1_0x03fc0000"},
- {{ls, r1, 0x000ff000}, false, al, "ls r1 0x000ff000", "ls_r1_0x000ff000"},
- {{vc, r0, 0x00003fc0}, false, al, "vc r0 0x00003fc0", "vc_r0_0x00003fc0"},
- {{eq, r4, 0x0003fc00}, false, al, "eq r4 0x0003fc00", "eq_r4_0x0003fc00"},
- {{mi, r3, 0xab000000}, false, al, "mi r3 0xab000000", "mi_r3_0xab000000"},
- {{mi, r5, 0xfc000003}, false, al, "mi r5 0xfc000003", "mi_r5_0xfc000003"},
- {{vs, r8, 0xab000000}, false, al, "vs r8 0xab000000", "vs_r8_0xab000000"},
- {{cs, r9, 0x00ff0000}, false, al, "cs r9 0x00ff0000", "cs_r9_0x00ff0000"},
- {{ge, r7, 0x00000ab0}, false, al, "ge r7 0x00000ab0", "ge_r7_0x00000ab0"},
- {{al, r9, 0xf000000f}, false, al, "al r9 0xf000000f", "al_r9_0xf000000f"},
- {{vc, r10, 0x000000ff}, false, al, "vc r10 0x000000ff", "vc_r10_"
- "0x000000ff"},
- {{al, r4, 0x00ab0000}, false, al, "al r4 0x00ab0000", "al_r4_0x00ab0000"},
- {{ls, r6, 0x0000ff00}, false, al, "ls r6 0x0000ff00", "ls_r6_0x0000ff00"},
- {{cc, r14, 0x0ab00000}, false, al, "cc r14 0x0ab00000", "cc_r14_"
- "0x0ab00000"},
- {{le, r5, 0x3fc00000}, false, al, "le r5 0x3fc00000", "le_r5_0x3fc00000"},
- {{ne, r12, 0x0ff00000}, false, al, "ne r12 0x0ff00000", "ne_r12_"
- "0x0ff00000"},
- {{cc, r11, 0xf000000f}, false, al, "cc r11 0xf000000f", "cc_r11_"
- "0xf000000f"},
- {{lt, r9, 0x000003fc}, false, al, "lt r9 0x000003fc", "lt_r9_0x000003fc"},
- {{ne, r7, 0x000002ac}, false, al, "ne r7 0x000002ac", "ne_r7_0x000002ac"},
- {{ge, r5, 0x00003fc0}, false, al, "ge r5 0x00003fc0", "ge_r5_0x00003fc0"},
- {{lt, r14, 0x00002ac0}, false, al, "lt r14 0x00002ac0", "lt_r14_"
- "0x00002ac0"},
- {{vs, r8, 0x0002ac00}, false, al, "vs r8 0x0002ac00", "vs_r8_0x0002ac00"},
- {{gt, r7, 0x0000ff00}, false, al, "gt r7 0x0000ff00", "gt_r7_0x0000ff00"},
- {{gt, r11, 0x000002ac}, false, al, "gt r11 0x000002ac", "gt_r11_"
- "0x000002ac"},
- {{lt, r12, 0x00003fc0}, false, al, "lt r12 0x00003fc0", "lt_r12_"
- "0x00003fc0"},
- {{gt, r4, 0x00ff0000}, false, al, "gt r4 0x00ff0000", "gt_r4_0x00ff0000"},
- {{gt, r0, 0x000ff000}, false, al, "gt r0 0x000ff000", "gt_r0_0x000ff000"},
- {{hi, r3, 0x0002ac00}, false, al, "hi r3 0x0002ac00", "hi_r3_0x0002ac00"},
- {{al, r6, 0x003fc000}, false, al, "al r6 0x003fc000", "al_r6_0x003fc000"},
- {{al, r5, 0x0003fc00}, false, al, "al r5 0x0003fc00", "al_r5_0x0003fc00"},
- {{mi, r14, 0x03fc0000}, false, al, "mi r14 0x03fc0000", "mi_r14_"
- "0x03fc0000"},
- {{le, r10, 0x0ab00000}, false, al, "le r10 0x0ab00000", "le_r10_"
- "0x0ab00000"},
- {{vc, r4, 0xab000000}, false, al, "vc r4 0xab000000", "vc_r4_0xab000000"},
- {{lt, r12, 0xf000000f}, false, al, "lt r12 0xf000000f", "lt_r12_"
- "0xf000000f"},
- {{le, r12, 0xf000000f}, false, al, "le r12 0xf000000f", "le_r12_"
- "0xf000000f"},
- {{pl, r3, 0xfc000003}, false, al, "pl r3 0xfc000003", "pl_r3_0xfc000003"},
- {{ls, r9, 0x000003fc}, false, al, "ls r9 0x000003fc", "ls_r9_0x000003fc"},
- {{hi, r0, 0x00ff0000}, false, al, "hi r0 0x00ff0000", "hi_r0_0x00ff0000"},
- {{ge, r5, 0x000000ab}, false, al, "ge r5 0x000000ab", "ge_r5_0x000000ab"},
- {{pl, r1, 0x00000000}, false, al, "pl r1 0x00000000", "pl_r1_0x00000000"},
- {{vs, r11, 0x0002ac00}, false, al, "vs r11 0x0002ac00", "vs_r11_"
- "0x0002ac00"},
- {{cc, r10, 0xac000002}, false, al, "cc r10 0xac000002", "cc_r10_"
- "0xac000002"},
- {{al, r2, 0x00000000}, false, al, "al r2 0x00000000", "al_r2_0x00000000"},
- {{eq, r8, 0xab000000}, false, al, "eq r8 0xab000000", "eq_r8_0xab000000"},
- {{al, r5, 0xb000000a}, false, al, "al r5 0xb000000a", "al_r5_0xb000000a"},
- {{cs, r8, 0x000003fc}, false, al, "cs r8 0x000003fc", "cs_r8_0x000003fc"},
- {{ge, r5, 0xc000003f}, false, al, "ge r5 0xc000003f", "ge_r5_0xc000003f"},
- {{eq, r5, 0x00000ff0}, false, al, "eq r5 0x00000ff0", "eq_r5_0x00000ff0"},
- {{eq, r5, 0x000ff000}, false, al, "eq r5 0x000ff000", "eq_r5_0x000ff000"},
- {{ls, r7, 0x00000000}, false, al, "ls r7 0x00000000", "ls_r7_0x00000000"},
- {{vc, r9, 0x00ff0000}, false, al, "vc r9 0x00ff0000", "vc_r9_0x00ff0000"},
- {{le, r6, 0x00000ff0}, false, al, "le r6 0x00000ff0", "le_r6_0x00000ff0"},
- {{pl, r10, 0xac000002}, false, al, "pl r10 0xac000002", "pl_r10_"
- "0xac000002"},
- {{vs, r13, 0x3fc00000}, false, al, "vs r13 0x3fc00000", "vs_r13_"
- "0x3fc00000"},
- {{gt, r12, 0x000ab000}, false, al, "gt r12 0x000ab000", "gt_r12_"
- "0x000ab000"},
- {{pl, r7, 0x0000ff00}, false, al, "pl r7 0x0000ff00", "pl_r7_0x0000ff00"},
- {{hi, r13, 0xab000000}, false, al, "hi r13 0xab000000", "hi_r13_"
- "0xab000000"},
- {{vc, r3, 0x00003fc0}, false, al, "vc r3 0x00003fc0", "vc_r3_0x00003fc0"},
- {{eq, r8, 0x03fc0000}, false, al, "eq r8 0x03fc0000", "eq_r8_0x03fc0000"},
- {{ge, r7, 0x00ab0000}, false, al, "ge r7 0x00ab0000", "ge_r7_0x00ab0000"},
- {{ls, r5, 0x00000ab0}, false, al, "ls r5 0x00000ab0", "ls_r5_0x00000ab0"},
- {{ls, r8, 0xb000000a}, false, al, "ls r8 0xb000000a", "ls_r8_0xb000000a"},
- {{cs, r2, 0xab000000}, false, al, "cs r2 0xab000000", "cs_r2_0xab000000"},
- {{ne, r1, 0x0ab00000}, false, al, "ne r1 0x0ab00000", "ne_r1_0x0ab00000"},
- {{hi, r3, 0x0ab00000}, false, al, "hi r3 0x0ab00000", "hi_r3_0x0ab00000"},
- {{vc, r1, 0x00ab0000}, false, al, "vc r1 0x00ab0000", "vc_r1_0x00ab0000"},
- {{gt, r12, 0x0003fc00}, false, al, "gt r12 0x0003fc00", "gt_r12_"
- "0x0003fc00"},
- {{lt, r11, 0x00000ff0}, false, al, "lt r11 0x00000ff0", "lt_r11_"
- "0x00000ff0"},
- {{ne, r5, 0xc000002a}, false, al, "ne r5 0xc000002a", "ne_r5_0xc000002a"},
- {{vc, r6, 0x000003fc}, false, al, "vc r6 0x000003fc", "vc_r6_0x000003fc"},
- {{gt, r1, 0xab000000}, false, al, "gt r1 0xab000000", "gt_r1_0xab000000"},
- {{gt, r5, 0x3fc00000}, false, al, "gt r5 0x3fc00000", "gt_r5_0x3fc00000"},
- {{cs, r3, 0x00002ac0}, false, al, "cs r3 0x00002ac0", "cs_r3_0x00002ac0"},
- {{mi, r0, 0xff000000}, false, al, "mi r0 0xff000000", "mi_r0_0xff000000"},
- {{cs, r3, 0x00000ff0}, false, al, "cs r3 0x00000ff0", "cs_r3_0x00000ff0"},
- {{le, r13, 0xff000000}, false, al, "le r13 0xff000000", "le_r13_"
- "0xff000000"},
- {{hi, r8, 0x00000000}, false, al, "hi r8 0x00000000", "hi_r8_0x00000000"},
- {{vc, r12, 0x0002ac00}, false, al, "vc r12 0x0002ac00", "vc_r12_"
- "0x0002ac00"},
- {{eq, r9, 0x00000ff0}, false, al, "eq r9 0x00000ff0", "eq_r9_0x00000ff0"},
- {{vc, r14, 0x0ff00000}, false, al, "vc r14 0x0ff00000", "vc_r14_"
- "0x0ff00000"},
- {{hi, r2, 0x00ff0000}, false, al, "hi r2 0x00ff0000", "hi_r2_0x00ff0000"},
- {{al, r2, 0x03fc0000}, false, al, "al r2 0x03fc0000", "al_r2_0x03fc0000"},
- {{mi, r14, 0x0ab00000}, false, al, "mi r14 0x0ab00000", "mi_r14_"
- "0x0ab00000"},
- {{vs, r11, 0x000ff000}, false, al, "vs r11 0x000ff000", "vs_r11_"
- "0x000ff000"},
- {{le, r11, 0x0003fc00}, false, al, "le r11 0x0003fc00", "le_r11_"
- "0x0003fc00"},
- {{eq, r6, 0x002ac000}, false, al, "eq r6 0x002ac000", "eq_r6_0x002ac000"},
- {{ne, r12, 0xab000000}, false, al, "ne r12 0xab000000", "ne_r12_"
- "0xab000000"},
- {{eq, r4, 0x000ff000}, false, al, "eq r4 0x000ff000", "eq_r4_0x000ff000"},
- {{cs, r6, 0x000003fc}, false, al, "cs r6 0x000003fc", "cs_r6_0x000003fc"},
- {{le, r13, 0xf000000f}, false, al, "le r13 0xf000000f", "le_r13_"
- "0xf000000f"},
- {{vs, r3, 0x02ac0000}, false, al, "vs r3 0x02ac0000", "vs_r3_0x02ac0000"},
- {{lt, r3, 0xab000000}, false, al, "lt r3 0xab000000", "lt_r3_0xab000000"},
- {{pl, r5, 0xab000000}, false, al, "pl r5 0xab000000", "pl_r5_0xab000000"},
- {{mi, r10, 0x00002ac0}, false, al, "mi r10 0x00002ac0", "mi_r10_"
- "0x00002ac0"},
- {{vc, r3, 0x000000ab}, false, al, "vc r3 0x000000ab", "vc_r3_0x000000ab"},
- {{pl, r10, 0x02ac0000}, false, al, "pl r10 0x02ac0000", "pl_r10_"
- "0x02ac0000"},
- {{vs, r11, 0x3fc00000}, false, al, "vs r11 0x3fc00000", "vs_r11_"
- "0x3fc00000"},
- {{cc, r7, 0x3fc00000}, false, al, "cc r7 0x3fc00000", "cc_r7_0x3fc00000"},
- {{cs, r3, 0x0000ab00}, false, al, "cs r3 0x0000ab00", "cs_r3_0x0000ab00"},
- {{cc, r14, 0x02ac0000}, false, al, "cc r14 0x02ac0000", "cc_r14_"
- "0x02ac0000"},
- {{vs, r0, 0x2ac00000}, false, al, "vs r0 0x2ac00000", "vs_r0_0x2ac00000"},
- {{gt, r13, 0x0ff00000}, false, al, "gt r13 0x0ff00000", "gt_r13_"
- "0x0ff00000"},
- {{mi, r10, 0x2ac00000}, false, al, "mi r10 0x2ac00000", "mi_r10_"
- "0x2ac00000"},
- {{ls, r2, 0x0ab00000}, false, al, "ls r2 0x0ab00000", "ls_r2_0x0ab00000"},
- {{pl, r10, 0x000000ff}, false, al, "pl r10 0x000000ff", "pl_r10_"
- "0x000000ff"},
- {{al, r9, 0x3fc00000}, false, al, "al r9 0x3fc00000", "al_r9_0x3fc00000"},
- {{vs, r4, 0x2ac00000}, false, al, "vs r4 0x2ac00000", "vs_r4_0x2ac00000"},
- {{vs, r12, 0x000ff000}, false, al, "vs r12 0x000ff000", "vs_r12_"
- "0x000ff000"},
- {{ge, r7, 0xc000002a}, false, al, "ge r7 0xc000002a", "ge_r7_0xc000002a"},
- {{vc, r7, 0x0002ac00}, false, al, "vc r7 0x0002ac00", "vc_r7_0x0002ac00"},
- {{gt, r7, 0x000ab000}, false, al, "gt r7 0x000ab000", "gt_r7_0x000ab000"},
- {{gt, r13, 0x000000ab}, false, al, "gt r13 0x000000ab", "gt_r13_"
- "0x000000ab"},
- {{ne, r1, 0xfc000003}, false, al, "ne r1 0xfc000003", "ne_r1_0xfc000003"},
- {{vc, r14, 0x000ab000}, false, al, "vc r14 0x000ab000", "vc_r14_"
- "0x000ab000"},
- {{gt, r5, 0xf000000f}, false, al, "gt r5 0xf000000f", "gt_r5_0xf000000f"},
- {{ge, r6, 0x00ff0000}, false, al, "ge r6 0x00ff0000", "ge_r6_0x00ff0000"},
- {{ls, r12, 0x0000ff00}, false, al, "ls r12 0x0000ff00", "ls_r12_"
- "0x0000ff00"},
- {{mi, r12, 0x03fc0000}, false, al, "mi r12 0x03fc0000", "mi_r12_"
- "0x03fc0000"},
- {{vs, r10, 0x0003fc00}, false, al, "vs r10 0x0003fc00", "vs_r10_"
- "0x0003fc00"},
- {{gt, r6, 0x0000ff00}, false, al, "gt r6 0x0000ff00", "gt_r6_0x0000ff00"},
- {{pl, r6, 0x00ff0000}, false, al, "pl r6 0x00ff0000", "pl_r6_0x00ff0000"},
- {{ne, r0, 0x002ac000}, false, al, "ne r0 0x002ac000", "ne_r0_0x002ac000"},
- {{eq, r2, 0x003fc000}, false, al, "eq r2 0x003fc000", "eq_r2_0x003fc000"},
- {{ne, r10, 0x000003fc}, false, al, "ne r10 0x000003fc", "ne_r10_"
- "0x000003fc"},
- {{lt, r10, 0x02ac0000}, false, al, "lt r10 0x02ac0000", "lt_r10_"
- "0x02ac0000"},
- {{lt, r2, 0x000ff000}, false, al, "lt r2 0x000ff000", "lt_r2_0x000ff000"},
- {{pl, r3, 0x0000ab00}, false, al, "pl r3 0x0000ab00", "pl_r3_0x0000ab00"},
- {{ge, r9, 0xc000003f}, false, al, "ge r9 0xc000003f", "ge_r9_0xc000003f"},
- {{vs, r4, 0x000003fc}, false, al, "vs r4 0x000003fc", "vs_r4_0x000003fc"},
- {{hi, r13, 0xf000000f}, false, al, "hi r13 0xf000000f", "hi_r13_"
- "0xf000000f"},
- {{pl, r1, 0x02ac0000}, false, al, "pl r1 0x02ac0000", "pl_r1_0x02ac0000"},
- {{pl, r1, 0x000000ff}, false, al, "pl r1 0x000000ff", "pl_r1_0x000000ff"},
- {{al, r3, 0x00000ff0}, false, al, "al r3 0x00000ff0", "al_r3_0x00000ff0"},
- {{gt, r0, 0x03fc0000}, false, al, "gt r0 0x03fc0000", "gt_r0_0x03fc0000"},
- {{cc, r1, 0x2ac00000}, false, al, "cc r1 0x2ac00000", "cc_r1_0x2ac00000"},
- {{mi, r9, 0xc000003f}, false, al, "mi r9 0xc000003f", "mi_r9_0xc000003f"},
- {{vc, r4, 0xff000000}, false, al, "vc r4 0xff000000", "vc_r4_0xff000000"},
- {{pl, r9, 0x0002ac00}, false, al, "pl r9 0x0002ac00", "pl_r9_0x0002ac00"},
- {{vs, r8, 0x02ac0000}, false, al, "vs r8 0x02ac0000", "vs_r8_0x02ac0000"},
- {{gt, r7, 0xf000000f}, false, al, "gt r7 0xf000000f", "gt_r7_0xf000000f"},
- {{gt, r10, 0xff000000}, false, al, "gt r10 0xff000000", "gt_r10_"
- "0xff000000"},
- {{cs, r0, 0x00003fc0}, false, al, "cs r0 0x00003fc0", "cs_r0_0x00003fc0"},
- {{gt, r8, 0x03fc0000}, false, al, "gt r8 0x03fc0000", "gt_r8_0x03fc0000"},
- {{ne, r8, 0x00002ac0}, false, al, "ne r8 0x00002ac0", "ne_r8_0x00002ac0"},
- {{ne, r13, 0xc000002a}, false, al, "ne r13 0xc000002a", "ne_r13_"
- "0xc000002a"},
- {{ne, r8, 0x3fc00000}, false, al, "ne r8 0x3fc00000", "ne_r8_0x3fc00000"},
- {{vs, r2, 0x002ac000}, false, al, "vs r2 0x002ac000", "vs_r2_0x002ac000"},
- {{vs, r7, 0x000000ab}, false, al, "vs r7 0x000000ab", "vs_r7_0x000000ab"},
- {{cc, r6, 0x00000000}, false, al, "cc r6 0x00000000", "cc_r6_0x00000000"},
- {{ls, r3, 0xf000000f}, false, al, "ls r3 0xf000000f", "ls_r3_0xf000000f"},
- {{hi, r0, 0x000000ff}, false, al, "hi r0 0x000000ff", "hi_r0_0x000000ff"},
- {{cs, r6, 0x00002ac0}, false, al, "cs r6 0x00002ac0", "cs_r6_0x00002ac0"},
- {{al, r1, 0xb000000a}, false, al, "al r1 0xb000000a", "al_r1_0xb000000a"},
- {{cc, r0, 0x00002ac0}, false, al, "cc r0 0x00002ac0", "cc_r0_0x00002ac0"},
- {{cc, r12, 0x000003fc}, false, al, "cc r12 0x000003fc", "cc_r12_"
- "0x000003fc"},
- {{ne, r14, 0xc000002a}, false, al, "ne r14 0xc000002a", "ne_r14_"
- "0xc000002a"},
- {{al, r6, 0xc000003f}, false, al, "al r6 0xc000003f", "al_r6_0xc000003f"},
- {{ls, r5, 0xf000000f}, false, al, "ls r5 0xf000000f", "ls_r5_0xf000000f"},
- {{mi, r5, 0x03fc0000}, false, al, "mi r5 0x03fc0000", "mi_r5_0x03fc0000"},
- {{cs, r14, 0xf000000f}, false, al, "cs r14 0xf000000f", "cs_r14_"
- "0xf000000f"},
- {{hi, r0, 0x02ac0000}, false, al, "hi r0 0x02ac0000", "hi_r0_0x02ac0000"},
- {{pl, r10, 0x00000000}, false, al, "pl r10 0x00000000", "pl_r10_"
- "0x00000000"},
- {{ne, r6, 0xc000003f}, false, al, "ne r6 0xc000003f", "ne_r6_0xc000003f"},
- {{mi, r6, 0x00002ac0}, false, al, "mi r6 0x00002ac0", "mi_r6_0x00002ac0"},
- {{vs, r6, 0x000000ab}, false, al, "vs r6 0x000000ab", "vs_r6_0x000000ab"},
- {{vc, r6, 0x3fc00000}, false, al, "vc r6 0x3fc00000", "vc_r6_0x3fc00000"},
- {{al, r10, 0x3fc00000}, false, al, "al r10 0x3fc00000", "al_r10_"
- "0x3fc00000"},
- {{eq, r7, 0xc000003f}, false, al, "eq r7 0xc000003f", "eq_r7_0xc000003f"},
- {{vs, r9, 0x02ac0000}, false, al, "vs r9 0x02ac0000", "vs_r9_0x02ac0000"},
- {{mi, r6, 0xac000002}, false, al, "mi r6 0xac000002", "mi_r6_0xac000002"},
- {{vc, r8, 0xc000002a}, false, al, "vc r8 0xc000002a", "vc_r8_0xc000002a"},
- {{pl, r2, 0x0000ab00}, false, al, "pl r2 0x0000ab00", "pl_r2_0x0000ab00"},
- {{hi, r4, 0x00000ff0}, false, al, "hi r4 0x00000ff0", "hi_r4_0x00000ff0"},
- {{al, r8, 0x003fc000}, false, al, "al r8 0x003fc000", "al_r8_0x003fc000"},
- {{vs, r12, 0xfc000003}, false, al, "vs r12 0xfc000003", "vs_r12_"
- "0xfc000003"},
- {{cs, r0, 0xac000002}, false, al, "cs r0 0xac000002", "cs_r0_0xac000002"},
- {{mi, r4, 0x02ac0000}, false, al, "mi r4 0x02ac0000", "mi_r4_0x02ac0000"},
- {{pl, r11, 0x00003fc0}, false, al, "pl r11 0x00003fc0", "pl_r11_"
- "0x00003fc0"},
- {{pl, r13, 0xac000002}, false, al, "pl r13 0xac000002", "pl_r13_"
- "0xac000002"},
- {{hi, r8, 0x0000ff00}, false, al, "hi r8 0x0000ff00", "hi_r8_0x0000ff00"},
- {{cs, r13, 0x2ac00000}, false, al, "cs r13 0x2ac00000", "cs_r13_"
- "0x2ac00000"},
- {{hi, r11, 0xc000003f}, false, al, "hi r11 0xc000003f", "hi_r11_"
- "0xc000003f"},
- {{lt, r5, 0x000002ac}, false, al, "lt r5 0x000002ac", "lt_r5_0x000002ac"},
- {{vs, r4, 0x0ff00000}, false, al, "vs r4 0x0ff00000", "vs_r4_0x0ff00000"},
- {{lt, r2, 0x00000ab0}, false, al, "lt r2 0x00000ab0", "lt_r2_0x00000ab0"},
- {{pl, r12, 0x000000ab}, false, al, "pl r12 0x000000ab", "pl_r12_"
- "0x000000ab"},
- {{gt, r3, 0xb000000a}, false, al, "gt r3 0xb000000a", "gt_r3_0xb000000a"},
- {{hi, r12, 0x0002ac00}, false, al, "hi r12 0x0002ac00", "hi_r12_"
- "0x0002ac00"},
- {{le, r0, 0x00ff0000}, false, al, "le r0 0x00ff0000", "le_r0_0x00ff0000"},
- {{eq, r6, 0x000ab000}, false, al, "eq r6 0x000ab000", "eq_r6_0x000ab000"},
- {{lt, r6, 0x0ff00000}, false, al, "lt r6 0x0ff00000", "lt_r6_0x0ff00000"},
- {{mi, r4, 0x000ff000}, false, al, "mi r4 0x000ff000", "mi_r4_0x000ff000"},
- {{le, r3, 0x0ab00000}, false, al, "le r3 0x0ab00000", "le_r3_0x0ab00000"},
- {{ge, r3, 0xac000002}, false, al, "ge r3 0xac000002", "ge_r3_0xac000002"},
- {{gt, r13, 0x00000ff0}, false, al, "gt r13 0x00000ff0", "gt_r13_"
- "0x00000ff0"},
- {{le, r10, 0x0000ff00}, false, al, "le r10 0x0000ff00", "le_r10_"
- "0x0000ff00"},
- {{lt, r14, 0x00ab0000}, false, al, "lt r14 0x00ab0000", "lt_r14_"
- "0x00ab0000"},
- {{mi, r2, 0x00003fc0}, false, al, "mi r2 0x00003fc0", "mi_r2_0x00003fc0"},
- {{ge, r9, 0x00002ac0}, false, al, "ge r9 0x00002ac0", "ge_r9_0x00002ac0"},
- {{al, r11, 0x2ac00000}, false, al, "al r11 0x2ac00000", "al_r11_"
- "0x2ac00000"},
- {{pl, r6, 0x00000000}, false, al, "pl r6 0x00000000", "pl_r6_0x00000000"},
- {{ls, r2, 0x002ac000}, false, al, "ls r2 0x002ac000", "ls_r2_0x002ac000"},
- {{lt, r0, 0xf000000f}, false, al, "lt r0 0xf000000f", "lt_r0_0xf000000f"},
- {{cs, r6, 0x00000ff0}, false, al, "cs r6 0x00000ff0", "cs_r6_0x00000ff0"},
- {{lt, r12, 0x00000ff0}, false, al, "lt r12 0x00000ff0", "lt_r12_"
- "0x00000ff0"},
- {{al, r6, 0xff000000}, false, al, "al r6 0xff000000", "al_r6_0xff000000"},
- {{gt, r4, 0x0000ff00}, false, al, "gt r4 0x0000ff00", "gt_r4_0x0000ff00"},
- {{al, r14, 0x0000ff00}, false, al, "al r14 0x0000ff00", "al_r14_"
- "0x0000ff00"},
- {{al, r10, 0xf000000f}, false, al, "al r10 0xf000000f", "al_r10_"
- "0xf000000f"},
- {{mi, r3, 0x0003fc00}, false, al, "mi r3 0x0003fc00", "mi_r3_0x0003fc00"},
- {{pl, r0, 0xf000000f}, false, al, "pl r0 0xf000000f", "pl_r0_0xf000000f"},
- {{al, r6, 0x00000ab0}, false, al, "al r6 0x00000ab0", "al_r6_0x00000ab0"},
- {{le, r8, 0x000002ac}, false, al, "le r8 0x000002ac", "le_r8_0x000002ac"},
- {{ge, r14, 0x00000000}, false, al, "ge r14 0x00000000", "ge_r14_"
- "0x00000000"},
- {{eq, r1, 0xff000000}, false, al, "eq r1 0xff000000", "eq_r1_0xff000000"},
- {{mi, r14, 0x00000ff0}, false, al, "mi r14 0x00000ff0", "mi_r14_"
- "0x00000ff0"},
- {{eq, r3, 0xff000000}, false, al, "eq r3 0xff000000", "eq_r3_0xff000000"},
- {{mi, r0, 0x0000ab00}, false, al, "mi r0 0x0000ab00", "mi_r0_0x0000ab00"},
- {{mi, r5, 0x000ff000}, false, al, "mi r5 0x000ff000", "mi_r5_0x000ff000"},
- {{vs, r3, 0x0ff00000}, false, al, "vs r3 0x0ff00000", "vs_r3_0x0ff00000"},
- {{gt, r14, 0x0ff00000}, false, al, "gt r14 0x0ff00000", "gt_r14_"
- "0x0ff00000"},
- {{le, r6, 0x000003fc}, false, al, "le r6 0x000003fc", "le_r6_0x000003fc"},
- {{vs, r6, 0xab000000}, false, al, "vs r6 0xab000000", "vs_r6_0xab000000"},
- {{le, r0, 0x000000ab}, false, al, "le r0 0x000000ab", "le_r0_0x000000ab"},
- {{cc, r9, 0x0ab00000}, false, al, "cc r9 0x0ab00000", "cc_r9_0x0ab00000"},
- {{vs, r10, 0x0ff00000}, false, al, "vs r10 0x0ff00000", "vs_r10_"
- "0x0ff00000"},
- {{gt, r3, 0x002ac000}, false, al, "gt r3 0x002ac000", "gt_r3_0x002ac000"},
- {{le, r2, 0x0ab00000}, false, al, "le r2 0x0ab00000", "le_r2_0x0ab00000"},
- {{ne, r14, 0xc000003f}, false, al, "ne r14 0xc000003f", "ne_r14_"
- "0xc000003f"},
- {{ne, r14, 0x000ff000}, false, al, "ne r14 0x000ff000", "ne_r14_"
- "0x000ff000"},
- {{hi, r3, 0xb000000a}, false, al, "hi r3 0xb000000a", "hi_r3_0xb000000a"},
- {{cs, r7, 0x000000ab}, false, al, "cs r7 0x000000ab", "cs_r7_0x000000ab"},
- {{eq, r12, 0x3fc00000}, false, al, "eq r12 0x3fc00000", "eq_r12_"
- "0x3fc00000"},
- {{ls, r5, 0x03fc0000}, false, al, "ls r5 0x03fc0000", "ls_r5_0x03fc0000"},
- {{ne, r13, 0x003fc000}, false, al, "ne r13 0x003fc000", "ne_r13_"
- "0x003fc000"},
- {{hi, r6, 0xac000002}, false, al, "hi r6 0xac000002", "hi_r6_0xac000002"},
- {{vc, r6, 0xfc000003}, false, al, "vc r6 0xfc000003", "vc_r6_0xfc000003"},
- {{pl, r10, 0x00ab0000}, false, al, "pl r10 0x00ab0000", "pl_r10_"
- "0x00ab0000"},
- {{vc, r6, 0x0000ff00}, false, al, "vc r6 0x0000ff00", "vc_r6_0x0000ff00"},
- {{cs, r0, 0x003fc000}, false, al, "cs r0 0x003fc000", "cs_r0_0x003fc000"},
- {{hi, r5, 0x000003fc}, false, al, "hi r5 0x000003fc", "hi_r5_0x000003fc"},
- {{mi, r7, 0x0002ac00}, false, al, "mi r7 0x0002ac00", "mi_r7_0x0002ac00"},
- {{ne, r0, 0x02ac0000}, false, al, "ne r0 0x02ac0000", "ne_r0_0x02ac0000"},
- {{vs, r12, 0xc000002a}, false, al, "vs r12 0xc000002a", "vs_r12_"
- "0xc000002a"},
- {{al, r12, 0x000002ac}, false, al, "al r12 0x000002ac", "al_r12_"
- "0x000002ac"},
- {{cs, r4, 0x3fc00000}, false, al, "cs r4 0x3fc00000", "cs_r4_0x3fc00000"},
- {{ne, r9, 0x00000ab0}, false, al, "ne r9 0x00000ab0", "ne_r9_0x00000ab0"},
- {{eq, r14, 0x0003fc00},
- false,
- al,
- "eq r14 0x0003fc00",
- "eq_r14_0x0003fc00"}};
+const TestData kTests[] =
+ {{{ls, r0, 0x003fc000}, false, al, "ls r0 0x003fc000", "ls_r0_0x003fc000"},
+ {{eq, r13, 0xff000000},
+ false,
+ al,
+ "eq r13 0xff000000",
+ "eq_r13_"
+ "0xff000000"},
+ {{al, r0, 0x0002ac00}, false, al, "al r0 0x0002ac00", "al_r0_0x0002ac00"},
+ {{gt, r13, 0x002ac000},
+ false,
+ al,
+ "gt r13 0x002ac000",
+ "gt_r13_"
+ "0x002ac000"},
+ {{mi, r3, 0x000002ac}, false, al, "mi r3 0x000002ac", "mi_r3_0x000002ac"},
+ {{ls, r0, 0x000000ff}, false, al, "ls r0 0x000000ff", "ls_r0_0x000000ff"},
+ {{ls, r7, 0x0000ab00}, false, al, "ls r7 0x0000ab00", "ls_r7_0x0000ab00"},
+ {{cc, r11, 0x0ff00000},
+ false,
+ al,
+ "cc r11 0x0ff00000",
+ "cc_r11_"
+ "0x0ff00000"},
+ {{vs, r5, 0xc000003f}, false, al, "vs r5 0xc000003f", "vs_r5_0xc000003f"},
+ {{gt, r3, 0x00ab0000}, false, al, "gt r3 0x00ab0000", "gt_r3_0x00ab0000"},
+ {{hi, r7, 0xff000000}, false, al, "hi r7 0xff000000", "hi_r7_0xff000000"},
+ {{vc, r10, 0xff000000},
+ false,
+ al,
+ "vc r10 0xff000000",
+ "vc_r10_"
+ "0xff000000"},
+ {{hi, r10, 0x002ac000},
+ false,
+ al,
+ "hi r10 0x002ac000",
+ "hi_r10_"
+ "0x002ac000"},
+ {{ne, r9, 0x00ff0000}, false, al, "ne r9 0x00ff0000", "ne_r9_0x00ff0000"},
+ {{ge, r3, 0xf000000f}, false, al, "ge r3 0xf000000f", "ge_r3_0xf000000f"},
+ {{hi, r0, 0x000ff000}, false, al, "hi r0 0x000ff000", "hi_r0_0x000ff000"},
+ {{mi, r2, 0x00002ac0}, false, al, "mi r2 0x00002ac0", "mi_r2_0x00002ac0"},
+ {{ge, r6, 0x000ab000}, false, al, "ge r6 0x000ab000", "ge_r6_0x000ab000"},
+ {{mi, r9, 0x00ff0000}, false, al, "mi r9 0x00ff0000", "mi_r9_0x00ff0000"},
+ {{cs, r2, 0x000ff000}, false, al, "cs r2 0x000ff000", "cs_r2_0x000ff000"},
+ {{lt, r5, 0x003fc000}, false, al, "lt r5 0x003fc000", "lt_r5_0x003fc000"},
+ {{al, r1, 0x000ff000}, false, al, "al r1 0x000ff000", "al_r1_0x000ff000"},
+ {{mi, r6, 0x000000ab}, false, al, "mi r6 0x000000ab", "mi_r6_0x000000ab"},
+ {{pl, r9, 0xac000002}, false, al, "pl r9 0xac000002", "pl_r9_0xac000002"},
+ {{hi, r8, 0x000ff000}, false, al, "hi r8 0x000ff000", "hi_r8_0x000ff000"},
+ {{vs, r0, 0x00002ac0}, false, al, "vs r0 0x00002ac0", "vs_r0_0x00002ac0"},
+ {{ls, r4, 0xab000000}, false, al, "ls r4 0xab000000", "ls_r4_0xab000000"},
+ {{vs, r6, 0x000ab000}, false, al, "vs r6 0x000ab000", "vs_r6_0x000ab000"},
+ {{vc, r13, 0x0000ab00},
+ false,
+ al,
+ "vc r13 0x0000ab00",
+ "vc_r13_"
+ "0x0000ab00"},
+ {{mi, r0, 0xab000000}, false, al, "mi r0 0xab000000", "mi_r0_0xab000000"},
+ {{vs, r9, 0x0ab00000}, false, al, "vs r9 0x0ab00000", "vs_r9_0x0ab00000"},
+ {{pl, r0, 0x00003fc0}, false, al, "pl r0 0x00003fc0", "pl_r0_0x00003fc0"},
+ {{al, r2, 0x0000ff00}, false, al, "al r2 0x0000ff00", "al_r2_0x0000ff00"},
+ {{gt, r11, 0x00000ab0},
+ false,
+ al,
+ "gt r11 0x00000ab0",
+ "gt_r11_"
+ "0x00000ab0"},
+ {{vs, r10, 0xac000002},
+ false,
+ al,
+ "vs r10 0xac000002",
+ "vs_r10_"
+ "0xac000002"},
+ {{cs, r3, 0x0002ac00}, false, al, "cs r3 0x0002ac00", "cs_r3_0x0002ac00"},
+ {{vc, r13, 0x000000ab},
+ false,
+ al,
+ "vc r13 0x000000ab",
+ "vc_r13_"
+ "0x000000ab"},
+ {{cs, r11, 0x003fc000},
+ false,
+ al,
+ "cs r11 0x003fc000",
+ "cs_r11_"
+ "0x003fc000"},
+ {{vs, r14, 0x0000ab00},
+ false,
+ al,
+ "vs r14 0x0000ab00",
+ "vs_r14_"
+ "0x0000ab00"},
+ {{eq, r11, 0xfc000003},
+ false,
+ al,
+ "eq r11 0xfc000003",
+ "eq_r11_"
+ "0xfc000003"},
+ {{pl, r13, 0x00000ab0},
+ false,
+ al,
+ "pl r13 0x00000ab0",
+ "pl_r13_"
+ "0x00000ab0"},
+ {{ge, r4, 0xb000000a}, false, al, "ge r4 0xb000000a", "ge_r4_0xb000000a"},
+ {{pl, r12, 0x00003fc0},
+ false,
+ al,
+ "pl r12 0x00003fc0",
+ "pl_r12_"
+ "0x00003fc0"},
+ {{le, r4, 0x0000ff00}, false, al, "le r4 0x0000ff00", "le_r4_0x0000ff00"},
+ {{pl, r4, 0x003fc000}, false, al, "pl r4 0x003fc000", "pl_r4_0x003fc000"},
+ {{hi, r0, 0x002ac000}, false, al, "hi r0 0x002ac000", "hi_r0_0x002ac000"},
+ {{mi, r1, 0x00003fc0}, false, al, "mi r1 0x00003fc0", "mi_r1_0x00003fc0"},
+ {{hi, r9, 0xf000000f}, false, al, "hi r9 0xf000000f", "hi_r9_0xf000000f"},
+ {{al, r11, 0x0000ab00},
+ false,
+ al,
+ "al r11 0x0000ab00",
+ "al_r11_"
+ "0x0000ab00"},
+ {{ne, r6, 0x00ab0000}, false, al, "ne r6 0x00ab0000", "ne_r6_0x00ab0000"},
+ {{lt, r4, 0xff000000}, false, al, "lt r4 0xff000000", "lt_r4_0xff000000"},
+ {{pl, r0, 0x0ab00000}, false, al, "pl r0 0x0ab00000", "pl_r0_0x0ab00000"},
+ {{ls, r2, 0xc000002a}, false, al, "ls r2 0xc000002a", "ls_r2_0xc000002a"},
+ {{lt, r5, 0x00003fc0}, false, al, "lt r5 0x00003fc0", "lt_r5_0x00003fc0"},
+ {{mi, r5, 0x000003fc}, false, al, "mi r5 0x000003fc", "mi_r5_0x000003fc"},
+ {{ls, r11, 0xb000000a},
+ false,
+ al,
+ "ls r11 0xb000000a",
+ "ls_r11_"
+ "0xb000000a"},
+ {{al, r3, 0x0000ff00}, false, al, "al r3 0x0000ff00", "al_r3_0x0000ff00"},
+ {{vs, r3, 0xfc000003}, false, al, "vs r3 0xfc000003", "vs_r3_0xfc000003"},
+ {{ne, r1, 0xc000002a}, false, al, "ne r1 0xc000002a", "ne_r1_0xc000002a"},
+ {{eq, r10, 0x0003fc00},
+ false,
+ al,
+ "eq r10 0x0003fc00",
+ "eq_r10_"
+ "0x0003fc00"},
+ {{eq, r3, 0xf000000f}, false, al, "eq r3 0xf000000f", "eq_r3_0xf000000f"},
+ {{vs, r5, 0x000ff000}, false, al, "vs r5 0x000ff000", "vs_r5_0x000ff000"},
+ {{ge, r12, 0x000000ab},
+ false,
+ al,
+ "ge r12 0x000000ab",
+ "ge_r12_"
+ "0x000000ab"},
+ {{vc, r12, 0xf000000f},
+ false,
+ al,
+ "vc r12 0xf000000f",
+ "vc_r12_"
+ "0xf000000f"},
+ {{lt, r11, 0x02ac0000},
+ false,
+ al,
+ "lt r11 0x02ac0000",
+ "lt_r11_"
+ "0x02ac0000"},
+ {{vs, r6, 0x003fc000}, false, al, "vs r6 0x003fc000", "vs_r6_0x003fc000"},
+ {{cs, r8, 0x3fc00000}, false, al, "cs r8 0x3fc00000", "cs_r8_0x3fc00000"},
+ {{le, r5, 0x0002ac00}, false, al, "le r5 0x0002ac00", "le_r5_0x0002ac00"},
+ {{ls, r9, 0x0002ac00}, false, al, "ls r9 0x0002ac00", "ls_r9_0x0002ac00"},
+ {{al, r4, 0x3fc00000}, false, al, "al r4 0x3fc00000", "al_r4_0x3fc00000"},
+ {{lt, r5, 0x000000ff}, false, al, "lt r5 0x000000ff", "lt_r5_0x000000ff"},
+ {{cs, r8, 0xc000002a}, false, al, "cs r8 0xc000002a", "cs_r8_0xc000002a"},
+ {{cs, r0, 0x00000ab0}, false, al, "cs r0 0x00000ab0", "cs_r0_0x00000ab0"},
+ {{cs, r2, 0x3fc00000}, false, al, "cs r2 0x3fc00000", "cs_r2_0x3fc00000"},
+ {{vs, r14, 0xab000000},
+ false,
+ al,
+ "vs r14 0xab000000",
+ "vs_r14_"
+ "0xab000000"},
+ {{ne, r8, 0x002ac000}, false, al, "ne r8 0x002ac000", "ne_r8_0x002ac000"},
+ {{vs, r1, 0x003fc000}, false, al, "vs r1 0x003fc000", "vs_r1_0x003fc000"},
+ {{al, r7, 0x003fc000}, false, al, "al r7 0x003fc000", "al_r7_0x003fc000"},
+ {{vs, r7, 0x000ab000}, false, al, "vs r7 0x000ab000", "vs_r7_0x000ab000"},
+ {{vc, r12, 0xb000000a},
+ false,
+ al,
+ "vc r12 0xb000000a",
+ "vc_r12_"
+ "0xb000000a"},
+ {{eq, r2, 0xc000002a}, false, al, "eq r2 0xc000002a", "eq_r2_0xc000002a"},
+ {{lt, r4, 0x0000ff00}, false, al, "lt r4 0x0000ff00", "lt_r4_0x0000ff00"},
+ {{eq, r8, 0x2ac00000}, false, al, "eq r8 0x2ac00000", "eq_r8_0x2ac00000"},
+ {{hi, r7, 0x3fc00000}, false, al, "hi r7 0x3fc00000", "hi_r7_0x3fc00000"},
+ {{mi, r13, 0x3fc00000},
+ false,
+ al,
+ "mi r13 0x3fc00000",
+ "mi_r13_"
+ "0x3fc00000"},
+ {{al, r2, 0x0002ac00}, false, al, "al r2 0x0002ac00", "al_r2_0x0002ac00"},
+ {{gt, r13, 0xab000000},
+ false,
+ al,
+ "gt r13 0xab000000",
+ "gt_r13_"
+ "0xab000000"},
+ {{vs, r3, 0x00000ab0}, false, al, "vs r3 0x00000ab0", "vs_r3_0x00000ab0"},
+ {{mi, r14, 0x00000000},
+ false,
+ al,
+ "mi r14 0x00000000",
+ "mi_r14_"
+ "0x00000000"},
+ {{vs, r10, 0x3fc00000},
+ false,
+ al,
+ "vs r10 0x3fc00000",
+ "vs_r10_"
+ "0x3fc00000"},
+ {{vc, r7, 0x0ff00000}, false, al, "vc r7 0x0ff00000", "vc_r7_0x0ff00000"},
+ {{al, r3, 0xf000000f}, false, al, "al r3 0xf000000f", "al_r3_0xf000000f"},
+ {{cs, r12, 0x03fc0000},
+ false,
+ al,
+ "cs r12 0x03fc0000",
+ "cs_r12_"
+ "0x03fc0000"},
+ {{hi, r14, 0xab000000},
+ false,
+ al,
+ "hi r14 0xab000000",
+ "hi_r14_"
+ "0xab000000"},
+ {{mi, r13, 0x000002ac},
+ false,
+ al,
+ "mi r13 0x000002ac",
+ "mi_r13_"
+ "0x000002ac"},
+ {{ge, r8, 0x2ac00000}, false, al, "ge r8 0x2ac00000", "ge_r8_0x2ac00000"},
+ {{vc, r14, 0x000003fc},
+ false,
+ al,
+ "vc r14 0x000003fc",
+ "vc_r14_"
+ "0x000003fc"},
+ {{mi, r4, 0x0000ab00}, false, al, "mi r4 0x0000ab00", "mi_r4_0x0000ab00"},
+ {{hi, r11, 0x0ff00000},
+ false,
+ al,
+ "hi r11 0x0ff00000",
+ "hi_r11_"
+ "0x0ff00000"},
+ {{gt, r8, 0x000ff000}, false, al, "gt r8 0x000ff000", "gt_r8_0x000ff000"},
+ {{lt, r1, 0x3fc00000}, false, al, "lt r1 0x3fc00000", "lt_r1_0x3fc00000"},
+ {{mi, r12, 0x000000ab},
+ false,
+ al,
+ "mi r12 0x000000ab",
+ "mi_r12_"
+ "0x000000ab"},
+ {{vs, r12, 0x000003fc},
+ false,
+ al,
+ "vs r12 0x000003fc",
+ "vs_r12_"
+ "0x000003fc"},
+ {{cs, r10, 0x000ff000},
+ false,
+ al,
+ "cs r10 0x000ff000",
+ "cs_r10_"
+ "0x000ff000"},
+ {{mi, r12, 0xfc000003},
+ false,
+ al,
+ "mi r12 0xfc000003",
+ "mi_r12_"
+ "0xfc000003"},
+ {{pl, r8, 0x000000ff}, false, al, "pl r8 0x000000ff", "pl_r8_0x000000ff"},
+ {{lt, r5, 0x00002ac0}, false, al, "lt r5 0x00002ac0", "lt_r5_0x00002ac0"},
+ {{ge, r0, 0xff000000}, false, al, "ge r0 0xff000000", "ge_r0_0xff000000"},
+ {{pl, r0, 0xab000000}, false, al, "pl r0 0xab000000", "pl_r0_0xab000000"},
+ {{ls, r8, 0x000ab000}, false, al, "ls r8 0x000ab000", "ls_r8_0x000ab000"},
+ {{al, r12, 0x00000ff0},
+ false,
+ al,
+ "al r12 0x00000ff0",
+ "al_r12_"
+ "0x00000ff0"},
+ {{hi, r5, 0x00003fc0}, false, al, "hi r5 0x00003fc0", "hi_r5_0x00003fc0"},
+ {{vs, r7, 0x0003fc00}, false, al, "vs r7 0x0003fc00", "vs_r7_0x0003fc00"},
+ {{ls, r13, 0x0ff00000},
+ false,
+ al,
+ "ls r13 0x0ff00000",
+ "ls_r13_"
+ "0x0ff00000"},
+ {{al, r7, 0x000000ab}, false, al, "al r7 0x000000ab", "al_r7_0x000000ab"},
+ {{lt, r6, 0x000003fc}, false, al, "lt r6 0x000003fc", "lt_r6_0x000003fc"},
+ {{mi, r9, 0xc000002a}, false, al, "mi r9 0xc000002a", "mi_r9_0xc000002a"},
+ {{ne, r11, 0x003fc000},
+ false,
+ al,
+ "ne r11 0x003fc000",
+ "ne_r11_"
+ "0x003fc000"},
+ {{cs, r4, 0x00000ab0}, false, al, "cs r4 0x00000ab0", "cs_r4_0x00000ab0"},
+ {{vc, r14, 0x2ac00000},
+ false,
+ al,
+ "vc r14 0x2ac00000",
+ "vc_r14_"
+ "0x2ac00000"},
+ {{vc, r8, 0x2ac00000}, false, al, "vc r8 0x2ac00000", "vc_r8_0x2ac00000"},
+ {{ge, r10, 0x003fc000},
+ false,
+ al,
+ "ge r10 0x003fc000",
+ "ge_r10_"
+ "0x003fc000"},
+ {{lt, r14, 0xb000000a},
+ false,
+ al,
+ "lt r14 0xb000000a",
+ "lt_r14_"
+ "0xb000000a"},
+ {{cs, r12, 0x000ff000},
+ false,
+ al,
+ "cs r12 0x000ff000",
+ "cs_r12_"
+ "0x000ff000"},
+ {{eq, r2, 0xac000002}, false, al, "eq r2 0xac000002", "eq_r2_0xac000002"},
+ {{le, r11, 0x2ac00000},
+ false,
+ al,
+ "le r11 0x2ac00000",
+ "le_r11_"
+ "0x2ac00000"},
+ {{le, r8, 0xab000000}, false, al, "le r8 0xab000000", "le_r8_0xab000000"},
+ {{lt, r5, 0x02ac0000}, false, al, "lt r5 0x02ac0000", "lt_r5_0x02ac0000"},
+ {{hi, r13, 0x003fc000},
+ false,
+ al,
+ "hi r13 0x003fc000",
+ "hi_r13_"
+ "0x003fc000"},
+ {{mi, r1, 0xfc000003}, false, al, "mi r1 0xfc000003", "mi_r1_0xfc000003"},
+ {{cc, r4, 0x0ab00000}, false, al, "cc r4 0x0ab00000", "cc_r4_0x0ab00000"},
+ {{lt, r7, 0x3fc00000}, false, al, "lt r7 0x3fc00000", "lt_r7_0x3fc00000"},
+ {{lt, r3, 0x000002ac}, false, al, "lt r3 0x000002ac", "lt_r3_0x000002ac"},
+ {{pl, r7, 0x00000000}, false, al, "pl r7 0x00000000", "pl_r7_0x00000000"},
+ {{lt, r2, 0x00000000}, false, al, "lt r2 0x00000000", "lt_r2_0x00000000"},
+ {{le, r1, 0x0ff00000}, false, al, "le r1 0x0ff00000", "le_r1_0x0ff00000"},
+ {{lt, r14, 0x0000ff00},
+ false,
+ al,
+ "lt r14 0x0000ff00",
+ "lt_r14_"
+ "0x0000ff00"},
+ {{cs, r11, 0x3fc00000},
+ false,
+ al,
+ "cs r11 0x3fc00000",
+ "cs_r11_"
+ "0x3fc00000"},
+ {{ls, r7, 0x00000ff0}, false, al, "ls r7 0x00000ff0", "ls_r7_0x00000ff0"},
+ {{vs, r3, 0x0ab00000}, false, al, "vs r3 0x0ab00000", "vs_r3_0x0ab00000"},
+ {{cs, r12, 0x0003fc00},
+ false,
+ al,
+ "cs r12 0x0003fc00",
+ "cs_r12_"
+ "0x0003fc00"},
+ {{vc, r3, 0xfc000003}, false, al, "vc r3 0xfc000003", "vc_r3_0xfc000003"},
+ {{vs, r14, 0x0ff00000},
+ false,
+ al,
+ "vs r14 0x0ff00000",
+ "vs_r14_"
+ "0x0ff00000"},
+ {{vc, r5, 0x00003fc0}, false, al, "vc r5 0x00003fc0", "vc_r5_0x00003fc0"},
+ {{cc, r14, 0x0ff00000},
+ false,
+ al,
+ "cc r14 0x0ff00000",
+ "cc_r14_"
+ "0x0ff00000"},
+ {{cs, r7, 0x0ff00000}, false, al, "cs r7 0x0ff00000", "cs_r7_0x0ff00000"},
+ {{al, r2, 0x00ab0000}, false, al, "al r2 0x00ab0000", "al_r2_0x00ab0000"},
+ {{gt, r0, 0x00000000}, false, al, "gt r0 0x00000000", "gt_r0_0x00000000"},
+ {{al, r6, 0x000003fc}, false, al, "al r6 0x000003fc", "al_r6_0x000003fc"},
+ {{ge, r2, 0x2ac00000}, false, al, "ge r2 0x2ac00000", "ge_r2_0x2ac00000"},
+ {{vs, r0, 0x03fc0000}, false, al, "vs r0 0x03fc0000", "vs_r0_0x03fc0000"},
+ {{cs, r12, 0x00003fc0},
+ false,
+ al,
+ "cs r12 0x00003fc0",
+ "cs_r12_"
+ "0x00003fc0"},
+ {{ge, r3, 0x2ac00000}, false, al, "ge r3 0x2ac00000", "ge_r3_0x2ac00000"},
+ {{le, r13, 0x0000ab00},
+ false,
+ al,
+ "le r13 0x0000ab00",
+ "le_r13_"
+ "0x0000ab00"},
+ {{al, r13, 0x02ac0000},
+ false,
+ al,
+ "al r13 0x02ac0000",
+ "al_r13_"
+ "0x02ac0000"},
+ {{mi, r4, 0xff000000}, false, al, "mi r4 0xff000000", "mi_r4_0xff000000"},
+ {{cs, r3, 0x00003fc0}, false, al, "cs r3 0x00003fc0", "cs_r3_0x00003fc0"},
+ {{ge, r13, 0x00ab0000},
+ false,
+ al,
+ "ge r13 0x00ab0000",
+ "ge_r13_"
+ "0x00ab0000"},
+ {{ne, r4, 0x00000ab0}, false, al, "ne r4 0x00000ab0", "ne_r4_0x00000ab0"},
+ {{cc, r3, 0xac000002}, false, al, "cc r3 0xac000002", "cc_r3_0xac000002"},
+ {{pl, r11, 0xab000000},
+ false,
+ al,
+ "pl r11 0xab000000",
+ "pl_r11_"
+ "0xab000000"},
+ {{eq, r13, 0xfc000003},
+ false,
+ al,
+ "eq r13 0xfc000003",
+ "eq_r13_"
+ "0xfc000003"},
+ {{ne, r5, 0xc000003f}, false, al, "ne r5 0xc000003f", "ne_r5_0xc000003f"},
+ {{hi, r7, 0xb000000a}, false, al, "hi r7 0xb000000a", "hi_r7_0xb000000a"},
+ {{al, r12, 0xc000002a},
+ false,
+ al,
+ "al r12 0xc000002a",
+ "al_r12_"
+ "0xc000002a"},
+ {{vs, r8, 0xf000000f}, false, al, "vs r8 0xf000000f", "vs_r8_0xf000000f"},
+ {{cs, r6, 0x00ff0000}, false, al, "cs r6 0x00ff0000", "cs_r6_0x00ff0000"},
+ {{vs, r1, 0x00002ac0}, false, al, "vs r1 0x00002ac0", "vs_r1_0x00002ac0"},
+ {{ls, r2, 0x0ff00000}, false, al, "ls r2 0x0ff00000", "ls_r2_0x0ff00000"},
+ {{mi, r13, 0x0000ab00},
+ false,
+ al,
+ "mi r13 0x0000ab00",
+ "mi_r13_"
+ "0x0000ab00"},
+ {{al, r4, 0xff000000}, false, al, "al r4 0xff000000", "al_r4_0xff000000"},
+ {{ne, r1, 0x00002ac0}, false, al, "ne r1 0x00002ac0", "ne_r1_0x00002ac0"},
+ {{vc, r14, 0x000002ac},
+ false,
+ al,
+ "vc r14 0x000002ac",
+ "vc_r14_"
+ "0x000002ac"},
+ {{al, r9, 0xb000000a}, false, al, "al r9 0xb000000a", "al_r9_0xb000000a"},
+ {{ne, r9, 0x000002ac}, false, al, "ne r9 0x000002ac", "ne_r9_0x000002ac"},
+ {{hi, r7, 0x0ff00000}, false, al, "hi r7 0x0ff00000", "hi_r7_0x0ff00000"},
+ {{ne, r10, 0x000ab000},
+ false,
+ al,
+ "ne r10 0x000ab000",
+ "ne_r10_"
+ "0x000ab000"},
+ {{vs, r0, 0x0003fc00}, false, al, "vs r0 0x0003fc00", "vs_r0_0x0003fc00"},
+ {{cs, r9, 0x002ac000}, false, al, "cs r9 0x002ac000", "cs_r9_0x002ac000"},
+ {{eq, r3, 0x00000ff0}, false, al, "eq r3 0x00000ff0", "eq_r3_0x00000ff0"},
+ {{lt, r5, 0x00ab0000}, false, al, "lt r5 0x00ab0000", "lt_r5_0x00ab0000"},
+ {{pl, r1, 0x00ff0000}, false, al, "pl r1 0x00ff0000", "pl_r1_0x00ff0000"},
+ {{eq, r4, 0x0000ab00}, false, al, "eq r4 0x0000ab00", "eq_r4_0x0000ab00"},
+ {{mi, r13, 0x000000ab},
+ false,
+ al,
+ "mi r13 0x000000ab",
+ "mi_r13_"
+ "0x000000ab"},
+ {{pl, r12, 0x00000ff0},
+ false,
+ al,
+ "pl r12 0x00000ff0",
+ "pl_r12_"
+ "0x00000ff0"},
+ {{eq, r3, 0x00002ac0}, false, al, "eq r3 0x00002ac0", "eq_r3_0x00002ac0"},
+ {{le, r12, 0x002ac000},
+ false,
+ al,
+ "le r12 0x002ac000",
+ "le_r12_"
+ "0x002ac000"},
+ {{ge, r10, 0x000002ac},
+ false,
+ al,
+ "ge r10 0x000002ac",
+ "ge_r10_"
+ "0x000002ac"},
+ {{vs, r1, 0x00ff0000}, false, al, "vs r1 0x00ff0000", "vs_r1_0x00ff0000"},
+ {{pl, r8, 0x0000ff00}, false, al, "pl r8 0x0000ff00", "pl_r8_0x0000ff00"},
+ {{vs, r9, 0x000ab000}, false, al, "vs r9 0x000ab000", "vs_r9_0x000ab000"},
+ {{ls, r6, 0x003fc000}, false, al, "ls r6 0x003fc000", "ls_r6_0x003fc000"},
+ {{vs, r14, 0x0ab00000},
+ false,
+ al,
+ "vs r14 0x0ab00000",
+ "vs_r14_"
+ "0x0ab00000"},
+ {{mi, r14, 0xf000000f},
+ false,
+ al,
+ "mi r14 0xf000000f",
+ "mi_r14_"
+ "0xf000000f"},
+ {{vc, r6, 0xf000000f}, false, al, "vc r6 0xf000000f", "vc_r6_0xf000000f"},
+ {{ne, r4, 0x0000ff00}, false, al, "ne r4 0x0000ff00", "ne_r4_0x0000ff00"},
+ {{gt, r10, 0xfc000003},
+ false,
+ al,
+ "gt r10 0xfc000003",
+ "gt_r10_"
+ "0xfc000003"},
+ {{cs, r6, 0x3fc00000}, false, al, "cs r6 0x3fc00000", "cs_r6_0x3fc00000"},
+ {{al, r10, 0x0ff00000},
+ false,
+ al,
+ "al r10 0x0ff00000",
+ "al_r10_"
+ "0x0ff00000"},
+ {{pl, r12, 0x00000000},
+ false,
+ al,
+ "pl r12 0x00000000",
+ "pl_r12_"
+ "0x00000000"},
+ {{cc, r5, 0xfc000003}, false, al, "cc r5 0xfc000003", "cc_r5_0xfc000003"},
+ {{pl, r10, 0x0003fc00},
+ false,
+ al,
+ "pl r10 0x0003fc00",
+ "pl_r10_"
+ "0x0003fc00"},
+ {{eq, r8, 0xac000002}, false, al, "eq r8 0xac000002", "eq_r8_0xac000002"},
+ {{vs, r12, 0xac000002},
+ false,
+ al,
+ "vs r12 0xac000002",
+ "vs_r12_"
+ "0xac000002"},
+ {{ne, r9, 0x00ab0000}, false, al, "ne r9 0x00ab0000", "ne_r9_0x00ab0000"},
+ {{al, r1, 0x0002ac00}, false, al, "al r1 0x0002ac00", "al_r1_0x0002ac00"},
+ {{ne, r6, 0x0000ff00}, false, al, "ne r6 0x0000ff00", "ne_r6_0x0000ff00"},
+ {{mi, r3, 0x03fc0000}, false, al, "mi r3 0x03fc0000", "mi_r3_0x03fc0000"},
+ {{ge, r10, 0x0002ac00},
+ false,
+ al,
+ "ge r10 0x0002ac00",
+ "ge_r10_"
+ "0x0002ac00"},
+ {{vc, r5, 0xb000000a}, false, al, "vc r5 0xb000000a", "vc_r5_0xb000000a"},
+ {{pl, r1, 0x000003fc}, false, al, "pl r1 0x000003fc", "pl_r1_0x000003fc"},
+ {{mi, r2, 0x02ac0000}, false, al, "mi r2 0x02ac0000", "mi_r2_0x02ac0000"},
+ {{gt, r7, 0x0003fc00}, false, al, "gt r7 0x0003fc00", "gt_r7_0x0003fc00"},
+ {{vs, r0, 0x00000000}, false, al, "vs r0 0x00000000", "vs_r0_0x00000000"},
+ {{vc, r11, 0xc000003f},
+ false,
+ al,
+ "vc r11 0xc000003f",
+ "vc_r11_"
+ "0xc000003f"},
+ {{vc, r13, 0x0ab00000},
+ false,
+ al,
+ "vc r13 0x0ab00000",
+ "vc_r13_"
+ "0x0ab00000"},
+ {{ge, r5, 0x0002ac00}, false, al, "ge r5 0x0002ac00", "ge_r5_0x0002ac00"},
+ {{ge, r8, 0xc000003f}, false, al, "ge r8 0xc000003f", "ge_r8_0xc000003f"},
+ {{al, r14, 0x000002ac},
+ false,
+ al,
+ "al r14 0x000002ac",
+ "al_r14_"
+ "0x000002ac"},
+ {{vs, r1, 0x00000000}, false, al, "vs r1 0x00000000", "vs_r1_0x00000000"},
+ {{vc, r2, 0x3fc00000}, false, al, "vc r2 0x3fc00000", "vc_r2_0x3fc00000"},
+ {{ne, r2, 0xc000003f}, false, al, "ne r2 0xc000003f", "ne_r2_0xc000003f"},
+ {{cs, r0, 0x0ab00000}, false, al, "cs r0 0x0ab00000", "cs_r0_0x0ab00000"},
+ {{le, r5, 0xfc000003}, false, al, "le r5 0xfc000003", "le_r5_0xfc000003"},
+ {{cs, r3, 0x000002ac}, false, al, "cs r3 0x000002ac", "cs_r3_0x000002ac"},
+ {{hi, r3, 0x0000ab00}, false, al, "hi r3 0x0000ab00", "hi_r3_0x0000ab00"},
+ {{ge, r9, 0x00ab0000}, false, al, "ge r9 0x00ab0000", "ge_r9_0x00ab0000"},
+ {{le, r0, 0x000ab000}, false, al, "le r0 0x000ab000", "le_r0_0x000ab000"},
+ {{cc, r7, 0x000003fc}, false, al, "cc r7 0x000003fc", "cc_r7_0x000003fc"},
+ {{pl, r7, 0x00002ac0}, false, al, "pl r7 0x00002ac0", "pl_r7_0x00002ac0"},
+ {{cc, r1, 0x00000ab0}, false, al, "cc r1 0x00000ab0", "cc_r1_0x00000ab0"},
+ {{le, r8, 0x0002ac00}, false, al, "le r8 0x0002ac00", "le_r8_0x0002ac00"},
+ {{mi, r9, 0x0003fc00}, false, al, "mi r9 0x0003fc00", "mi_r9_0x0003fc00"},
+ {{cs, r2, 0x000002ac}, false, al, "cs r2 0x000002ac", "cs_r2_0x000002ac"},
+ {{vc, r2, 0xb000000a}, false, al, "vc r2 0xb000000a", "vc_r2_0xb000000a"},
+ {{pl, r4, 0x000ab000}, false, al, "pl r4 0x000ab000", "pl_r4_0x000ab000"},
+ {{hi, r0, 0x0003fc00}, false, al, "hi r0 0x0003fc00", "hi_r0_0x0003fc00"},
+ {{vs, r12, 0x000ab000},
+ false,
+ al,
+ "vs r12 0x000ab000",
+ "vs_r12_"
+ "0x000ab000"},
+ {{lt, r9, 0x0003fc00}, false, al, "lt r9 0x0003fc00", "lt_r9_0x0003fc00"},
+ {{mi, r11, 0x00002ac0},
+ false,
+ al,
+ "mi r11 0x00002ac0",
+ "mi_r11_"
+ "0x00002ac0"},
+ {{ls, r13, 0x000ab000},
+ false,
+ al,
+ "ls r13 0x000ab000",
+ "ls_r13_"
+ "0x000ab000"},
+ {{al, r3, 0x3fc00000}, false, al, "al r3 0x3fc00000", "al_r3_0x3fc00000"},
+ {{eq, r14, 0x000000ff},
+ false,
+ al,
+ "eq r14 0x000000ff",
+ "eq_r14_"
+ "0x000000ff"},
+ {{le, r12, 0xff000000},
+ false,
+ al,
+ "le r12 0xff000000",
+ "le_r12_"
+ "0xff000000"},
+ {{gt, r8, 0xff000000}, false, al, "gt r8 0xff000000", "gt_r8_0xff000000"},
+ {{eq, r0, 0x00ff0000}, false, al, "eq r0 0x00ff0000", "eq_r0_0x00ff0000"},
+ {{cc, r5, 0xff000000}, false, al, "cc r5 0xff000000", "cc_r5_0xff000000"},
+ {{mi, r2, 0x0003fc00}, false, al, "mi r2 0x0003fc00", "mi_r2_0x0003fc00"},
+ {{cs, r10, 0xf000000f},
+ false,
+ al,
+ "cs r10 0xf000000f",
+ "cs_r10_"
+ "0xf000000f"},
+ {{eq, r0, 0xab000000}, false, al, "eq r0 0xab000000", "eq_r0_0xab000000"},
+ {{al, r1, 0x03fc0000}, false, al, "al r1 0x03fc0000", "al_r1_0x03fc0000"},
+ {{ne, r5, 0xff000000}, false, al, "ne r5 0xff000000", "ne_r5_0xff000000"},
+ {{ne, r1, 0x03fc0000}, false, al, "ne r1 0x03fc0000", "ne_r1_0x03fc0000"},
+ {{ls, r1, 0x000ff000}, false, al, "ls r1 0x000ff000", "ls_r1_0x000ff000"},
+ {{vc, r0, 0x00003fc0}, false, al, "vc r0 0x00003fc0", "vc_r0_0x00003fc0"},
+ {{eq, r4, 0x0003fc00}, false, al, "eq r4 0x0003fc00", "eq_r4_0x0003fc00"},
+ {{mi, r3, 0xab000000}, false, al, "mi r3 0xab000000", "mi_r3_0xab000000"},
+ {{mi, r5, 0xfc000003}, false, al, "mi r5 0xfc000003", "mi_r5_0xfc000003"},
+ {{vs, r8, 0xab000000}, false, al, "vs r8 0xab000000", "vs_r8_0xab000000"},
+ {{cs, r9, 0x00ff0000}, false, al, "cs r9 0x00ff0000", "cs_r9_0x00ff0000"},
+ {{ge, r7, 0x00000ab0}, false, al, "ge r7 0x00000ab0", "ge_r7_0x00000ab0"},
+ {{al, r9, 0xf000000f}, false, al, "al r9 0xf000000f", "al_r9_0xf000000f"},
+ {{vc, r10, 0x000000ff},
+ false,
+ al,
+ "vc r10 0x000000ff",
+ "vc_r10_"
+ "0x000000ff"},
+ {{al, r4, 0x00ab0000}, false, al, "al r4 0x00ab0000", "al_r4_0x00ab0000"},
+ {{ls, r6, 0x0000ff00}, false, al, "ls r6 0x0000ff00", "ls_r6_0x0000ff00"},
+ {{cc, r14, 0x0ab00000},
+ false,
+ al,
+ "cc r14 0x0ab00000",
+ "cc_r14_"
+ "0x0ab00000"},
+ {{le, r5, 0x3fc00000}, false, al, "le r5 0x3fc00000", "le_r5_0x3fc00000"},
+ {{ne, r12, 0x0ff00000},
+ false,
+ al,
+ "ne r12 0x0ff00000",
+ "ne_r12_"
+ "0x0ff00000"},
+ {{cc, r11, 0xf000000f},
+ false,
+ al,
+ "cc r11 0xf000000f",
+ "cc_r11_"
+ "0xf000000f"},
+ {{lt, r9, 0x000003fc}, false, al, "lt r9 0x000003fc", "lt_r9_0x000003fc"},
+ {{ne, r7, 0x000002ac}, false, al, "ne r7 0x000002ac", "ne_r7_0x000002ac"},
+ {{ge, r5, 0x00003fc0}, false, al, "ge r5 0x00003fc0", "ge_r5_0x00003fc0"},
+ {{lt, r14, 0x00002ac0},
+ false,
+ al,
+ "lt r14 0x00002ac0",
+ "lt_r14_"
+ "0x00002ac0"},
+ {{vs, r8, 0x0002ac00}, false, al, "vs r8 0x0002ac00", "vs_r8_0x0002ac00"},
+ {{gt, r7, 0x0000ff00}, false, al, "gt r7 0x0000ff00", "gt_r7_0x0000ff00"},
+ {{gt, r11, 0x000002ac},
+ false,
+ al,
+ "gt r11 0x000002ac",
+ "gt_r11_"
+ "0x000002ac"},
+ {{lt, r12, 0x00003fc0},
+ false,
+ al,
+ "lt r12 0x00003fc0",
+ "lt_r12_"
+ "0x00003fc0"},
+ {{gt, r4, 0x00ff0000}, false, al, "gt r4 0x00ff0000", "gt_r4_0x00ff0000"},
+ {{gt, r0, 0x000ff000}, false, al, "gt r0 0x000ff000", "gt_r0_0x000ff000"},
+ {{hi, r3, 0x0002ac00}, false, al, "hi r3 0x0002ac00", "hi_r3_0x0002ac00"},
+ {{al, r6, 0x003fc000}, false, al, "al r6 0x003fc000", "al_r6_0x003fc000"},
+ {{al, r5, 0x0003fc00}, false, al, "al r5 0x0003fc00", "al_r5_0x0003fc00"},
+ {{mi, r14, 0x03fc0000},
+ false,
+ al,
+ "mi r14 0x03fc0000",
+ "mi_r14_"
+ "0x03fc0000"},
+ {{le, r10, 0x0ab00000},
+ false,
+ al,
+ "le r10 0x0ab00000",
+ "le_r10_"
+ "0x0ab00000"},
+ {{vc, r4, 0xab000000}, false, al, "vc r4 0xab000000", "vc_r4_0xab000000"},
+ {{lt, r12, 0xf000000f},
+ false,
+ al,
+ "lt r12 0xf000000f",
+ "lt_r12_"
+ "0xf000000f"},
+ {{le, r12, 0xf000000f},
+ false,
+ al,
+ "le r12 0xf000000f",
+ "le_r12_"
+ "0xf000000f"},
+ {{pl, r3, 0xfc000003}, false, al, "pl r3 0xfc000003", "pl_r3_0xfc000003"},
+ {{ls, r9, 0x000003fc}, false, al, "ls r9 0x000003fc", "ls_r9_0x000003fc"},
+ {{hi, r0, 0x00ff0000}, false, al, "hi r0 0x00ff0000", "hi_r0_0x00ff0000"},
+ {{ge, r5, 0x000000ab}, false, al, "ge r5 0x000000ab", "ge_r5_0x000000ab"},
+ {{pl, r1, 0x00000000}, false, al, "pl r1 0x00000000", "pl_r1_0x00000000"},
+ {{vs, r11, 0x0002ac00},
+ false,
+ al,
+ "vs r11 0x0002ac00",
+ "vs_r11_"
+ "0x0002ac00"},
+ {{cc, r10, 0xac000002},
+ false,
+ al,
+ "cc r10 0xac000002",
+ "cc_r10_"
+ "0xac000002"},
+ {{al, r2, 0x00000000}, false, al, "al r2 0x00000000", "al_r2_0x00000000"},
+ {{eq, r8, 0xab000000}, false, al, "eq r8 0xab000000", "eq_r8_0xab000000"},
+ {{al, r5, 0xb000000a}, false, al, "al r5 0xb000000a", "al_r5_0xb000000a"},
+ {{cs, r8, 0x000003fc}, false, al, "cs r8 0x000003fc", "cs_r8_0x000003fc"},
+ {{ge, r5, 0xc000003f}, false, al, "ge r5 0xc000003f", "ge_r5_0xc000003f"},
+ {{eq, r5, 0x00000ff0}, false, al, "eq r5 0x00000ff0", "eq_r5_0x00000ff0"},
+ {{eq, r5, 0x000ff000}, false, al, "eq r5 0x000ff000", "eq_r5_0x000ff000"},
+ {{ls, r7, 0x00000000}, false, al, "ls r7 0x00000000", "ls_r7_0x00000000"},
+ {{vc, r9, 0x00ff0000}, false, al, "vc r9 0x00ff0000", "vc_r9_0x00ff0000"},
+ {{le, r6, 0x00000ff0}, false, al, "le r6 0x00000ff0", "le_r6_0x00000ff0"},
+ {{pl, r10, 0xac000002},
+ false,
+ al,
+ "pl r10 0xac000002",
+ "pl_r10_"
+ "0xac000002"},
+ {{vs, r13, 0x3fc00000},
+ false,
+ al,
+ "vs r13 0x3fc00000",
+ "vs_r13_"
+ "0x3fc00000"},
+ {{gt, r12, 0x000ab000},
+ false,
+ al,
+ "gt r12 0x000ab000",
+ "gt_r12_"
+ "0x000ab000"},
+ {{pl, r7, 0x0000ff00}, false, al, "pl r7 0x0000ff00", "pl_r7_0x0000ff00"},
+ {{hi, r13, 0xab000000},
+ false,
+ al,
+ "hi r13 0xab000000",
+ "hi_r13_"
+ "0xab000000"},
+ {{vc, r3, 0x00003fc0}, false, al, "vc r3 0x00003fc0", "vc_r3_0x00003fc0"},
+ {{eq, r8, 0x03fc0000}, false, al, "eq r8 0x03fc0000", "eq_r8_0x03fc0000"},
+ {{ge, r7, 0x00ab0000}, false, al, "ge r7 0x00ab0000", "ge_r7_0x00ab0000"},
+ {{ls, r5, 0x00000ab0}, false, al, "ls r5 0x00000ab0", "ls_r5_0x00000ab0"},
+ {{ls, r8, 0xb000000a}, false, al, "ls r8 0xb000000a", "ls_r8_0xb000000a"},
+ {{cs, r2, 0xab000000}, false, al, "cs r2 0xab000000", "cs_r2_0xab000000"},
+ {{ne, r1, 0x0ab00000}, false, al, "ne r1 0x0ab00000", "ne_r1_0x0ab00000"},
+ {{hi, r3, 0x0ab00000}, false, al, "hi r3 0x0ab00000", "hi_r3_0x0ab00000"},
+ {{vc, r1, 0x00ab0000}, false, al, "vc r1 0x00ab0000", "vc_r1_0x00ab0000"},
+ {{gt, r12, 0x0003fc00},
+ false,
+ al,
+ "gt r12 0x0003fc00",
+ "gt_r12_"
+ "0x0003fc00"},
+ {{lt, r11, 0x00000ff0},
+ false,
+ al,
+ "lt r11 0x00000ff0",
+ "lt_r11_"
+ "0x00000ff0"},
+ {{ne, r5, 0xc000002a}, false, al, "ne r5 0xc000002a", "ne_r5_0xc000002a"},
+ {{vc, r6, 0x000003fc}, false, al, "vc r6 0x000003fc", "vc_r6_0x000003fc"},
+ {{gt, r1, 0xab000000}, false, al, "gt r1 0xab000000", "gt_r1_0xab000000"},
+ {{gt, r5, 0x3fc00000}, false, al, "gt r5 0x3fc00000", "gt_r5_0x3fc00000"},
+ {{cs, r3, 0x00002ac0}, false, al, "cs r3 0x00002ac0", "cs_r3_0x00002ac0"},
+ {{mi, r0, 0xff000000}, false, al, "mi r0 0xff000000", "mi_r0_0xff000000"},
+ {{cs, r3, 0x00000ff0}, false, al, "cs r3 0x00000ff0", "cs_r3_0x00000ff0"},
+ {{le, r13, 0xff000000},
+ false,
+ al,
+ "le r13 0xff000000",
+ "le_r13_"
+ "0xff000000"},
+ {{hi, r8, 0x00000000}, false, al, "hi r8 0x00000000", "hi_r8_0x00000000"},
+ {{vc, r12, 0x0002ac00},
+ false,
+ al,
+ "vc r12 0x0002ac00",
+ "vc_r12_"
+ "0x0002ac00"},
+ {{eq, r9, 0x00000ff0}, false, al, "eq r9 0x00000ff0", "eq_r9_0x00000ff0"},
+ {{vc, r14, 0x0ff00000},
+ false,
+ al,
+ "vc r14 0x0ff00000",
+ "vc_r14_"
+ "0x0ff00000"},
+ {{hi, r2, 0x00ff0000}, false, al, "hi r2 0x00ff0000", "hi_r2_0x00ff0000"},
+ {{al, r2, 0x03fc0000}, false, al, "al r2 0x03fc0000", "al_r2_0x03fc0000"},
+ {{mi, r14, 0x0ab00000},
+ false,
+ al,
+ "mi r14 0x0ab00000",
+ "mi_r14_"
+ "0x0ab00000"},
+ {{vs, r11, 0x000ff000},
+ false,
+ al,
+ "vs r11 0x000ff000",
+ "vs_r11_"
+ "0x000ff000"},
+ {{le, r11, 0x0003fc00},
+ false,
+ al,
+ "le r11 0x0003fc00",
+ "le_r11_"
+ "0x0003fc00"},
+ {{eq, r6, 0x002ac000}, false, al, "eq r6 0x002ac000", "eq_r6_0x002ac000"},
+ {{ne, r12, 0xab000000},
+ false,
+ al,
+ "ne r12 0xab000000",
+ "ne_r12_"
+ "0xab000000"},
+ {{eq, r4, 0x000ff000}, false, al, "eq r4 0x000ff000", "eq_r4_0x000ff000"},
+ {{cs, r6, 0x000003fc}, false, al, "cs r6 0x000003fc", "cs_r6_0x000003fc"},
+ {{le, r13, 0xf000000f},
+ false,
+ al,
+ "le r13 0xf000000f",
+ "le_r13_"
+ "0xf000000f"},
+ {{vs, r3, 0x02ac0000}, false, al, "vs r3 0x02ac0000", "vs_r3_0x02ac0000"},
+ {{lt, r3, 0xab000000}, false, al, "lt r3 0xab000000", "lt_r3_0xab000000"},
+ {{pl, r5, 0xab000000}, false, al, "pl r5 0xab000000", "pl_r5_0xab000000"},
+ {{mi, r10, 0x00002ac0},
+ false,
+ al,
+ "mi r10 0x00002ac0",
+ "mi_r10_"
+ "0x00002ac0"},
+ {{vc, r3, 0x000000ab}, false, al, "vc r3 0x000000ab", "vc_r3_0x000000ab"},
+ {{pl, r10, 0x02ac0000},
+ false,
+ al,
+ "pl r10 0x02ac0000",
+ "pl_r10_"
+ "0x02ac0000"},
+ {{vs, r11, 0x3fc00000},
+ false,
+ al,
+ "vs r11 0x3fc00000",
+ "vs_r11_"
+ "0x3fc00000"},
+ {{cc, r7, 0x3fc00000}, false, al, "cc r7 0x3fc00000", "cc_r7_0x3fc00000"},
+ {{cs, r3, 0x0000ab00}, false, al, "cs r3 0x0000ab00", "cs_r3_0x0000ab00"},
+ {{cc, r14, 0x02ac0000},
+ false,
+ al,
+ "cc r14 0x02ac0000",
+ "cc_r14_"
+ "0x02ac0000"},
+ {{vs, r0, 0x2ac00000}, false, al, "vs r0 0x2ac00000", "vs_r0_0x2ac00000"},
+ {{gt, r13, 0x0ff00000},
+ false,
+ al,
+ "gt r13 0x0ff00000",
+ "gt_r13_"
+ "0x0ff00000"},
+ {{mi, r10, 0x2ac00000},
+ false,
+ al,
+ "mi r10 0x2ac00000",
+ "mi_r10_"
+ "0x2ac00000"},
+ {{ls, r2, 0x0ab00000}, false, al, "ls r2 0x0ab00000", "ls_r2_0x0ab00000"},
+ {{pl, r10, 0x000000ff},
+ false,
+ al,
+ "pl r10 0x000000ff",
+ "pl_r10_"
+ "0x000000ff"},
+ {{al, r9, 0x3fc00000}, false, al, "al r9 0x3fc00000", "al_r9_0x3fc00000"},
+ {{vs, r4, 0x2ac00000}, false, al, "vs r4 0x2ac00000", "vs_r4_0x2ac00000"},
+ {{vs, r12, 0x000ff000},
+ false,
+ al,
+ "vs r12 0x000ff000",
+ "vs_r12_"
+ "0x000ff000"},
+ {{ge, r7, 0xc000002a}, false, al, "ge r7 0xc000002a", "ge_r7_0xc000002a"},
+ {{vc, r7, 0x0002ac00}, false, al, "vc r7 0x0002ac00", "vc_r7_0x0002ac00"},
+ {{gt, r7, 0x000ab000}, false, al, "gt r7 0x000ab000", "gt_r7_0x000ab000"},
+ {{gt, r13, 0x000000ab},
+ false,
+ al,
+ "gt r13 0x000000ab",
+ "gt_r13_"
+ "0x000000ab"},
+ {{ne, r1, 0xfc000003}, false, al, "ne r1 0xfc000003", "ne_r1_0xfc000003"},
+ {{vc, r14, 0x000ab000},
+ false,
+ al,
+ "vc r14 0x000ab000",
+ "vc_r14_"
+ "0x000ab000"},
+ {{gt, r5, 0xf000000f}, false, al, "gt r5 0xf000000f", "gt_r5_0xf000000f"},
+ {{ge, r6, 0x00ff0000}, false, al, "ge r6 0x00ff0000", "ge_r6_0x00ff0000"},
+ {{ls, r12, 0x0000ff00},
+ false,
+ al,
+ "ls r12 0x0000ff00",
+ "ls_r12_"
+ "0x0000ff00"},
+ {{mi, r12, 0x03fc0000},
+ false,
+ al,
+ "mi r12 0x03fc0000",
+ "mi_r12_"
+ "0x03fc0000"},
+ {{vs, r10, 0x0003fc00},
+ false,
+ al,
+ "vs r10 0x0003fc00",
+ "vs_r10_"
+ "0x0003fc00"},
+ {{gt, r6, 0x0000ff00}, false, al, "gt r6 0x0000ff00", "gt_r6_0x0000ff00"},
+ {{pl, r6, 0x00ff0000}, false, al, "pl r6 0x00ff0000", "pl_r6_0x00ff0000"},
+ {{ne, r0, 0x002ac000}, false, al, "ne r0 0x002ac000", "ne_r0_0x002ac000"},
+ {{eq, r2, 0x003fc000}, false, al, "eq r2 0x003fc000", "eq_r2_0x003fc000"},
+ {{ne, r10, 0x000003fc},
+ false,
+ al,
+ "ne r10 0x000003fc",
+ "ne_r10_"
+ "0x000003fc"},
+ {{lt, r10, 0x02ac0000},
+ false,
+ al,
+ "lt r10 0x02ac0000",
+ "lt_r10_"
+ "0x02ac0000"},
+ {{lt, r2, 0x000ff000}, false, al, "lt r2 0x000ff000", "lt_r2_0x000ff000"},
+ {{pl, r3, 0x0000ab00}, false, al, "pl r3 0x0000ab00", "pl_r3_0x0000ab00"},
+ {{ge, r9, 0xc000003f}, false, al, "ge r9 0xc000003f", "ge_r9_0xc000003f"},
+ {{vs, r4, 0x000003fc}, false, al, "vs r4 0x000003fc", "vs_r4_0x000003fc"},
+ {{hi, r13, 0xf000000f},
+ false,
+ al,
+ "hi r13 0xf000000f",
+ "hi_r13_"
+ "0xf000000f"},
+ {{pl, r1, 0x02ac0000}, false, al, "pl r1 0x02ac0000", "pl_r1_0x02ac0000"},
+ {{pl, r1, 0x000000ff}, false, al, "pl r1 0x000000ff", "pl_r1_0x000000ff"},
+ {{al, r3, 0x00000ff0}, false, al, "al r3 0x00000ff0", "al_r3_0x00000ff0"},
+ {{gt, r0, 0x03fc0000}, false, al, "gt r0 0x03fc0000", "gt_r0_0x03fc0000"},
+ {{cc, r1, 0x2ac00000}, false, al, "cc r1 0x2ac00000", "cc_r1_0x2ac00000"},
+ {{mi, r9, 0xc000003f}, false, al, "mi r9 0xc000003f", "mi_r9_0xc000003f"},
+ {{vc, r4, 0xff000000}, false, al, "vc r4 0xff000000", "vc_r4_0xff000000"},
+ {{pl, r9, 0x0002ac00}, false, al, "pl r9 0x0002ac00", "pl_r9_0x0002ac00"},
+ {{vs, r8, 0x02ac0000}, false, al, "vs r8 0x02ac0000", "vs_r8_0x02ac0000"},
+ {{gt, r7, 0xf000000f}, false, al, "gt r7 0xf000000f", "gt_r7_0xf000000f"},
+ {{gt, r10, 0xff000000},
+ false,
+ al,
+ "gt r10 0xff000000",
+ "gt_r10_"
+ "0xff000000"},
+ {{cs, r0, 0x00003fc0}, false, al, "cs r0 0x00003fc0", "cs_r0_0x00003fc0"},
+ {{gt, r8, 0x03fc0000}, false, al, "gt r8 0x03fc0000", "gt_r8_0x03fc0000"},
+ {{ne, r8, 0x00002ac0}, false, al, "ne r8 0x00002ac0", "ne_r8_0x00002ac0"},
+ {{ne, r13, 0xc000002a},
+ false,
+ al,
+ "ne r13 0xc000002a",
+ "ne_r13_"
+ "0xc000002a"},
+ {{ne, r8, 0x3fc00000}, false, al, "ne r8 0x3fc00000", "ne_r8_0x3fc00000"},
+ {{vs, r2, 0x002ac000}, false, al, "vs r2 0x002ac000", "vs_r2_0x002ac000"},
+ {{vs, r7, 0x000000ab}, false, al, "vs r7 0x000000ab", "vs_r7_0x000000ab"},
+ {{cc, r6, 0x00000000}, false, al, "cc r6 0x00000000", "cc_r6_0x00000000"},
+ {{ls, r3, 0xf000000f}, false, al, "ls r3 0xf000000f", "ls_r3_0xf000000f"},
+ {{hi, r0, 0x000000ff}, false, al, "hi r0 0x000000ff", "hi_r0_0x000000ff"},
+ {{cs, r6, 0x00002ac0}, false, al, "cs r6 0x00002ac0", "cs_r6_0x00002ac0"},
+ {{al, r1, 0xb000000a}, false, al, "al r1 0xb000000a", "al_r1_0xb000000a"},
+ {{cc, r0, 0x00002ac0}, false, al, "cc r0 0x00002ac0", "cc_r0_0x00002ac0"},
+ {{cc, r12, 0x000003fc},
+ false,
+ al,
+ "cc r12 0x000003fc",
+ "cc_r12_"
+ "0x000003fc"},
+ {{ne, r14, 0xc000002a},
+ false,
+ al,
+ "ne r14 0xc000002a",
+ "ne_r14_"
+ "0xc000002a"},
+ {{al, r6, 0xc000003f}, false, al, "al r6 0xc000003f", "al_r6_0xc000003f"},
+ {{ls, r5, 0xf000000f}, false, al, "ls r5 0xf000000f", "ls_r5_0xf000000f"},
+ {{mi, r5, 0x03fc0000}, false, al, "mi r5 0x03fc0000", "mi_r5_0x03fc0000"},
+ {{cs, r14, 0xf000000f},
+ false,
+ al,
+ "cs r14 0xf000000f",
+ "cs_r14_"
+ "0xf000000f"},
+ {{hi, r0, 0x02ac0000}, false, al, "hi r0 0x02ac0000", "hi_r0_0x02ac0000"},
+ {{pl, r10, 0x00000000},
+ false,
+ al,
+ "pl r10 0x00000000",
+ "pl_r10_"
+ "0x00000000"},
+ {{ne, r6, 0xc000003f}, false, al, "ne r6 0xc000003f", "ne_r6_0xc000003f"},
+ {{mi, r6, 0x00002ac0}, false, al, "mi r6 0x00002ac0", "mi_r6_0x00002ac0"},
+ {{vs, r6, 0x000000ab}, false, al, "vs r6 0x000000ab", "vs_r6_0x000000ab"},
+ {{vc, r6, 0x3fc00000}, false, al, "vc r6 0x3fc00000", "vc_r6_0x3fc00000"},
+ {{al, r10, 0x3fc00000},
+ false,
+ al,
+ "al r10 0x3fc00000",
+ "al_r10_"
+ "0x3fc00000"},
+ {{eq, r7, 0xc000003f}, false, al, "eq r7 0xc000003f", "eq_r7_0xc000003f"},
+ {{vs, r9, 0x02ac0000}, false, al, "vs r9 0x02ac0000", "vs_r9_0x02ac0000"},
+ {{mi, r6, 0xac000002}, false, al, "mi r6 0xac000002", "mi_r6_0xac000002"},
+ {{vc, r8, 0xc000002a}, false, al, "vc r8 0xc000002a", "vc_r8_0xc000002a"},
+ {{pl, r2, 0x0000ab00}, false, al, "pl r2 0x0000ab00", "pl_r2_0x0000ab00"},
+ {{hi, r4, 0x00000ff0}, false, al, "hi r4 0x00000ff0", "hi_r4_0x00000ff0"},
+ {{al, r8, 0x003fc000}, false, al, "al r8 0x003fc000", "al_r8_0x003fc000"},
+ {{vs, r12, 0xfc000003},
+ false,
+ al,
+ "vs r12 0xfc000003",
+ "vs_r12_"
+ "0xfc000003"},
+ {{cs, r0, 0xac000002}, false, al, "cs r0 0xac000002", "cs_r0_0xac000002"},
+ {{mi, r4, 0x02ac0000}, false, al, "mi r4 0x02ac0000", "mi_r4_0x02ac0000"},
+ {{pl, r11, 0x00003fc0},
+ false,
+ al,
+ "pl r11 0x00003fc0",
+ "pl_r11_"
+ "0x00003fc0"},
+ {{pl, r13, 0xac000002},
+ false,
+ al,
+ "pl r13 0xac000002",
+ "pl_r13_"
+ "0xac000002"},
+ {{hi, r8, 0x0000ff00}, false, al, "hi r8 0x0000ff00", "hi_r8_0x0000ff00"},
+ {{cs, r13, 0x2ac00000},
+ false,
+ al,
+ "cs r13 0x2ac00000",
+ "cs_r13_"
+ "0x2ac00000"},
+ {{hi, r11, 0xc000003f},
+ false,
+ al,
+ "hi r11 0xc000003f",
+ "hi_r11_"
+ "0xc000003f"},
+ {{lt, r5, 0x000002ac}, false, al, "lt r5 0x000002ac", "lt_r5_0x000002ac"},
+ {{vs, r4, 0x0ff00000}, false, al, "vs r4 0x0ff00000", "vs_r4_0x0ff00000"},
+ {{lt, r2, 0x00000ab0}, false, al, "lt r2 0x00000ab0", "lt_r2_0x00000ab0"},
+ {{pl, r12, 0x000000ab},
+ false,
+ al,
+ "pl r12 0x000000ab",
+ "pl_r12_"
+ "0x000000ab"},
+ {{gt, r3, 0xb000000a}, false, al, "gt r3 0xb000000a", "gt_r3_0xb000000a"},
+ {{hi, r12, 0x0002ac00},
+ false,
+ al,
+ "hi r12 0x0002ac00",
+ "hi_r12_"
+ "0x0002ac00"},
+ {{le, r0, 0x00ff0000}, false, al, "le r0 0x00ff0000", "le_r0_0x00ff0000"},
+ {{eq, r6, 0x000ab000}, false, al, "eq r6 0x000ab000", "eq_r6_0x000ab000"},
+ {{lt, r6, 0x0ff00000}, false, al, "lt r6 0x0ff00000", "lt_r6_0x0ff00000"},
+ {{mi, r4, 0x000ff000}, false, al, "mi r4 0x000ff000", "mi_r4_0x000ff000"},
+ {{le, r3, 0x0ab00000}, false, al, "le r3 0x0ab00000", "le_r3_0x0ab00000"},
+ {{ge, r3, 0xac000002}, false, al, "ge r3 0xac000002", "ge_r3_0xac000002"},
+ {{gt, r13, 0x00000ff0},
+ false,
+ al,
+ "gt r13 0x00000ff0",
+ "gt_r13_"
+ "0x00000ff0"},
+ {{le, r10, 0x0000ff00},
+ false,
+ al,
+ "le r10 0x0000ff00",
+ "le_r10_"
+ "0x0000ff00"},
+ {{lt, r14, 0x00ab0000},
+ false,
+ al,
+ "lt r14 0x00ab0000",
+ "lt_r14_"
+ "0x00ab0000"},
+ {{mi, r2, 0x00003fc0}, false, al, "mi r2 0x00003fc0", "mi_r2_0x00003fc0"},
+ {{ge, r9, 0x00002ac0}, false, al, "ge r9 0x00002ac0", "ge_r9_0x00002ac0"},
+ {{al, r11, 0x2ac00000},
+ false,
+ al,
+ "al r11 0x2ac00000",
+ "al_r11_"
+ "0x2ac00000"},
+ {{pl, r6, 0x00000000}, false, al, "pl r6 0x00000000", "pl_r6_0x00000000"},
+ {{ls, r2, 0x002ac000}, false, al, "ls r2 0x002ac000", "ls_r2_0x002ac000"},
+ {{lt, r0, 0xf000000f}, false, al, "lt r0 0xf000000f", "lt_r0_0xf000000f"},
+ {{cs, r6, 0x00000ff0}, false, al, "cs r6 0x00000ff0", "cs_r6_0x00000ff0"},
+ {{lt, r12, 0x00000ff0},
+ false,
+ al,
+ "lt r12 0x00000ff0",
+ "lt_r12_"
+ "0x00000ff0"},
+ {{al, r6, 0xff000000}, false, al, "al r6 0xff000000", "al_r6_0xff000000"},
+ {{gt, r4, 0x0000ff00}, false, al, "gt r4 0x0000ff00", "gt_r4_0x0000ff00"},
+ {{al, r14, 0x0000ff00},
+ false,
+ al,
+ "al r14 0x0000ff00",
+ "al_r14_"
+ "0x0000ff00"},
+ {{al, r10, 0xf000000f},
+ false,
+ al,
+ "al r10 0xf000000f",
+ "al_r10_"
+ "0xf000000f"},
+ {{mi, r3, 0x0003fc00}, false, al, "mi r3 0x0003fc00", "mi_r3_0x0003fc00"},
+ {{pl, r0, 0xf000000f}, false, al, "pl r0 0xf000000f", "pl_r0_0xf000000f"},
+ {{al, r6, 0x00000ab0}, false, al, "al r6 0x00000ab0", "al_r6_0x00000ab0"},
+ {{le, r8, 0x000002ac}, false, al, "le r8 0x000002ac", "le_r8_0x000002ac"},
+ {{ge, r14, 0x00000000},
+ false,
+ al,
+ "ge r14 0x00000000",
+ "ge_r14_"
+ "0x00000000"},
+ {{eq, r1, 0xff000000}, false, al, "eq r1 0xff000000", "eq_r1_0xff000000"},
+ {{mi, r14, 0x00000ff0},
+ false,
+ al,
+ "mi r14 0x00000ff0",
+ "mi_r14_"
+ "0x00000ff0"},
+ {{eq, r3, 0xff000000}, false, al, "eq r3 0xff000000", "eq_r3_0xff000000"},
+ {{mi, r0, 0x0000ab00}, false, al, "mi r0 0x0000ab00", "mi_r0_0x0000ab00"},
+ {{mi, r5, 0x000ff000}, false, al, "mi r5 0x000ff000", "mi_r5_0x000ff000"},
+ {{vs, r3, 0x0ff00000}, false, al, "vs r3 0x0ff00000", "vs_r3_0x0ff00000"},
+ {{gt, r14, 0x0ff00000},
+ false,
+ al,
+ "gt r14 0x0ff00000",
+ "gt_r14_"
+ "0x0ff00000"},
+ {{le, r6, 0x000003fc}, false, al, "le r6 0x000003fc", "le_r6_0x000003fc"},
+ {{vs, r6, 0xab000000}, false, al, "vs r6 0xab000000", "vs_r6_0xab000000"},
+ {{le, r0, 0x000000ab}, false, al, "le r0 0x000000ab", "le_r0_0x000000ab"},
+ {{cc, r9, 0x0ab00000}, false, al, "cc r9 0x0ab00000", "cc_r9_0x0ab00000"},
+ {{vs, r10, 0x0ff00000},
+ false,
+ al,
+ "vs r10 0x0ff00000",
+ "vs_r10_"
+ "0x0ff00000"},
+ {{gt, r3, 0x002ac000}, false, al, "gt r3 0x002ac000", "gt_r3_0x002ac000"},
+ {{le, r2, 0x0ab00000}, false, al, "le r2 0x0ab00000", "le_r2_0x0ab00000"},
+ {{ne, r14, 0xc000003f},
+ false,
+ al,
+ "ne r14 0xc000003f",
+ "ne_r14_"
+ "0xc000003f"},
+ {{ne, r14, 0x000ff000},
+ false,
+ al,
+ "ne r14 0x000ff000",
+ "ne_r14_"
+ "0x000ff000"},
+ {{hi, r3, 0xb000000a}, false, al, "hi r3 0xb000000a", "hi_r3_0xb000000a"},
+ {{cs, r7, 0x000000ab}, false, al, "cs r7 0x000000ab", "cs_r7_0x000000ab"},
+ {{eq, r12, 0x3fc00000},
+ false,
+ al,
+ "eq r12 0x3fc00000",
+ "eq_r12_"
+ "0x3fc00000"},
+ {{ls, r5, 0x03fc0000}, false, al, "ls r5 0x03fc0000", "ls_r5_0x03fc0000"},
+ {{ne, r13, 0x003fc000},
+ false,
+ al,
+ "ne r13 0x003fc000",
+ "ne_r13_"
+ "0x003fc000"},
+ {{hi, r6, 0xac000002}, false, al, "hi r6 0xac000002", "hi_r6_0xac000002"},
+ {{vc, r6, 0xfc000003}, false, al, "vc r6 0xfc000003", "vc_r6_0xfc000003"},
+ {{pl, r10, 0x00ab0000},
+ false,
+ al,
+ "pl r10 0x00ab0000",
+ "pl_r10_"
+ "0x00ab0000"},
+ {{vc, r6, 0x0000ff00}, false, al, "vc r6 0x0000ff00", "vc_r6_0x0000ff00"},
+ {{cs, r0, 0x003fc000}, false, al, "cs r0 0x003fc000", "cs_r0_0x003fc000"},
+ {{hi, r5, 0x000003fc}, false, al, "hi r5 0x000003fc", "hi_r5_0x000003fc"},
+ {{mi, r7, 0x0002ac00}, false, al, "mi r7 0x0002ac00", "mi_r7_0x0002ac00"},
+ {{ne, r0, 0x02ac0000}, false, al, "ne r0 0x02ac0000", "ne_r0_0x02ac0000"},
+ {{vs, r12, 0xc000002a},
+ false,
+ al,
+ "vs r12 0xc000002a",
+ "vs_r12_"
+ "0xc000002a"},
+ {{al, r12, 0x000002ac},
+ false,
+ al,
+ "al r12 0x000002ac",
+ "al_r12_"
+ "0x000002ac"},
+ {{cs, r4, 0x3fc00000}, false, al, "cs r4 0x3fc00000", "cs_r4_0x3fc00000"},
+ {{ne, r9, 0x00000ab0}, false, al, "ne r9 0x00000ab0", "ne_r9_0x00000ab0"},
+ {{eq, r14, 0x0003fc00},
+ false,
+ al,
+ "eq r14 0x0003fc00",
+ "eq_r14_0x0003fc00"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-assembler-cond-rd-operand-const-t32.cc b/test/aarch32/test-assembler-cond-rd-operand-const-t32.cc
index f11f2cd..dd8318f 100644
--- a/test/aarch32/test-assembler-cond-rd-operand-const-t32.cc
+++ b/test/aarch32/test-assembler-cond-rd-operand-const-t32.cc
@@ -98,1130 +98,2246 @@
};
// Each element of this array produce one instruction encoding.
-const TestData kTests[] = {
- {{al, r0, 0x000001fe}, false, al, "al r0 0x000001fe", "al_r0_0x000001fe"},
- {{al, r0, 0x000003fc}, false, al, "al r0 0x000003fc", "al_r0_0x000003fc"},
- {{al, r0, 0x000007f8}, false, al, "al r0 0x000007f8", "al_r0_0x000007f8"},
- {{al, r0, 0x00000ff0}, false, al, "al r0 0x00000ff0", "al_r0_0x00000ff0"},
- {{al, r0, 0x00001fe0}, false, al, "al r0 0x00001fe0", "al_r0_0x00001fe0"},
- {{al, r0, 0x00003fc0}, false, al, "al r0 0x00003fc0", "al_r0_0x00003fc0"},
- {{al, r0, 0x00007f80}, false, al, "al r0 0x00007f80", "al_r0_0x00007f80"},
- {{al, r0, 0x0000ff00}, false, al, "al r0 0x0000ff00", "al_r0_0x0000ff00"},
- {{al, r0, 0x0001fe00}, false, al, "al r0 0x0001fe00", "al_r0_0x0001fe00"},
- {{al, r0, 0x0003fc00}, false, al, "al r0 0x0003fc00", "al_r0_0x0003fc00"},
- {{al, r0, 0x0007f800}, false, al, "al r0 0x0007f800", "al_r0_0x0007f800"},
- {{al, r0, 0x000ff000}, false, al, "al r0 0x000ff000", "al_r0_0x000ff000"},
- {{al, r0, 0x001fe000}, false, al, "al r0 0x001fe000", "al_r0_0x001fe000"},
- {{al, r0, 0x003fc000}, false, al, "al r0 0x003fc000", "al_r0_0x003fc000"},
- {{al, r0, 0x007f8000}, false, al, "al r0 0x007f8000", "al_r0_0x007f8000"},
- {{al, r0, 0x00ff0000}, false, al, "al r0 0x00ff0000", "al_r0_0x00ff0000"},
- {{al, r0, 0x01fe0000}, false, al, "al r0 0x01fe0000", "al_r0_0x01fe0000"},
- {{al, r0, 0x03fc0000}, false, al, "al r0 0x03fc0000", "al_r0_0x03fc0000"},
- {{al, r0, 0x07f80000}, false, al, "al r0 0x07f80000", "al_r0_0x07f80000"},
- {{al, r0, 0x0ff00000}, false, al, "al r0 0x0ff00000", "al_r0_0x0ff00000"},
- {{al, r0, 0x1fe00000}, false, al, "al r0 0x1fe00000", "al_r0_0x1fe00000"},
- {{al, r0, 0x3fc00000}, false, al, "al r0 0x3fc00000", "al_r0_0x3fc00000"},
- {{al, r0, 0x7f800000}, false, al, "al r0 0x7f800000", "al_r0_0x7f800000"},
- {{al, r0, 0xff000000}, false, al, "al r0 0xff000000", "al_r0_0xff000000"},
- {{al, r0, 0x000000ff}, false, al, "al r0 0x000000ff", "al_r0_0x000000ff"},
- {{al, r0, 0x00ff00ff}, false, al, "al r0 0x00ff00ff", "al_r0_0x00ff00ff"},
- {{al, r0, 0xff00ff00}, false, al, "al r0 0xff00ff00", "al_r0_0xff00ff00"},
- {{al, r0, 0xffffffff}, false, al, "al r0 0xffffffff", "al_r0_0xffffffff"},
- {{al, r0, 0x00000156}, false, al, "al r0 0x00000156", "al_r0_0x00000156"},
- {{al, r0, 0x000002ac}, false, al, "al r0 0x000002ac", "al_r0_0x000002ac"},
- {{al, r0, 0x00000558}, false, al, "al r0 0x00000558", "al_r0_0x00000558"},
- {{al, r0, 0x00000ab0}, false, al, "al r0 0x00000ab0", "al_r0_0x00000ab0"},
- {{al, r0, 0x00001560}, false, al, "al r0 0x00001560", "al_r0_0x00001560"},
- {{al, r0, 0x00002ac0}, false, al, "al r0 0x00002ac0", "al_r0_0x00002ac0"},
- {{al, r0, 0x00005580}, false, al, "al r0 0x00005580", "al_r0_0x00005580"},
- {{al, r0, 0x0000ab00}, false, al, "al r0 0x0000ab00", "al_r0_0x0000ab00"},
- {{al, r0, 0x00015600}, false, al, "al r0 0x00015600", "al_r0_0x00015600"},
- {{al, r0, 0x0002ac00}, false, al, "al r0 0x0002ac00", "al_r0_0x0002ac00"},
- {{al, r0, 0x00055800}, false, al, "al r0 0x00055800", "al_r0_0x00055800"},
- {{al, r0, 0x000ab000}, false, al, "al r0 0x000ab000", "al_r0_0x000ab000"},
- {{al, r0, 0x00156000}, false, al, "al r0 0x00156000", "al_r0_0x00156000"},
- {{al, r0, 0x002ac000}, false, al, "al r0 0x002ac000", "al_r0_0x002ac000"},
- {{al, r0, 0x00558000}, false, al, "al r0 0x00558000", "al_r0_0x00558000"},
- {{al, r0, 0x00ab0000}, false, al, "al r0 0x00ab0000", "al_r0_0x00ab0000"},
- {{al, r0, 0x01560000}, false, al, "al r0 0x01560000", "al_r0_0x01560000"},
- {{al, r0, 0x02ac0000}, false, al, "al r0 0x02ac0000", "al_r0_0x02ac0000"},
- {{al, r0, 0x05580000}, false, al, "al r0 0x05580000", "al_r0_0x05580000"},
- {{al, r0, 0x0ab00000}, false, al, "al r0 0x0ab00000", "al_r0_0x0ab00000"},
- {{al, r0, 0x15600000}, false, al, "al r0 0x15600000", "al_r0_0x15600000"},
- {{al, r0, 0x2ac00000}, false, al, "al r0 0x2ac00000", "al_r0_0x2ac00000"},
- {{al, r0, 0x55800000}, false, al, "al r0 0x55800000", "al_r0_0x55800000"},
- {{al, r0, 0xab000000}, false, al, "al r0 0xab000000", "al_r0_0xab000000"},
- {{al, r0, 0x000000ab}, false, al, "al r0 0x000000ab", "al_r0_0x000000ab"},
- {{al, r0, 0x00ab00ab}, false, al, "al r0 0x00ab00ab", "al_r0_0x00ab00ab"},
- {{al, r0, 0xab00ab00}, false, al, "al r0 0xab00ab00", "al_r0_0xab00ab00"},
- {{al, r0, 0xabababab}, false, al, "al r0 0xabababab", "al_r0_0xabababab"},
- {{al, r1, 0x000001fe}, false, al, "al r1 0x000001fe", "al_r1_0x000001fe"},
- {{al, r1, 0x000003fc}, false, al, "al r1 0x000003fc", "al_r1_0x000003fc"},
- {{al, r1, 0x000007f8}, false, al, "al r1 0x000007f8", "al_r1_0x000007f8"},
- {{al, r1, 0x00000ff0}, false, al, "al r1 0x00000ff0", "al_r1_0x00000ff0"},
- {{al, r1, 0x00001fe0}, false, al, "al r1 0x00001fe0", "al_r1_0x00001fe0"},
- {{al, r1, 0x00003fc0}, false, al, "al r1 0x00003fc0", "al_r1_0x00003fc0"},
- {{al, r1, 0x00007f80}, false, al, "al r1 0x00007f80", "al_r1_0x00007f80"},
- {{al, r1, 0x0000ff00}, false, al, "al r1 0x0000ff00", "al_r1_0x0000ff00"},
- {{al, r1, 0x0001fe00}, false, al, "al r1 0x0001fe00", "al_r1_0x0001fe00"},
- {{al, r1, 0x0003fc00}, false, al, "al r1 0x0003fc00", "al_r1_0x0003fc00"},
- {{al, r1, 0x0007f800}, false, al, "al r1 0x0007f800", "al_r1_0x0007f800"},
- {{al, r1, 0x000ff000}, false, al, "al r1 0x000ff000", "al_r1_0x000ff000"},
- {{al, r1, 0x001fe000}, false, al, "al r1 0x001fe000", "al_r1_0x001fe000"},
- {{al, r1, 0x003fc000}, false, al, "al r1 0x003fc000", "al_r1_0x003fc000"},
- {{al, r1, 0x007f8000}, false, al, "al r1 0x007f8000", "al_r1_0x007f8000"},
- {{al, r1, 0x00ff0000}, false, al, "al r1 0x00ff0000", "al_r1_0x00ff0000"},
- {{al, r1, 0x01fe0000}, false, al, "al r1 0x01fe0000", "al_r1_0x01fe0000"},
- {{al, r1, 0x03fc0000}, false, al, "al r1 0x03fc0000", "al_r1_0x03fc0000"},
- {{al, r1, 0x07f80000}, false, al, "al r1 0x07f80000", "al_r1_0x07f80000"},
- {{al, r1, 0x0ff00000}, false, al, "al r1 0x0ff00000", "al_r1_0x0ff00000"},
- {{al, r1, 0x1fe00000}, false, al, "al r1 0x1fe00000", "al_r1_0x1fe00000"},
- {{al, r1, 0x3fc00000}, false, al, "al r1 0x3fc00000", "al_r1_0x3fc00000"},
- {{al, r1, 0x7f800000}, false, al, "al r1 0x7f800000", "al_r1_0x7f800000"},
- {{al, r1, 0xff000000}, false, al, "al r1 0xff000000", "al_r1_0xff000000"},
- {{al, r1, 0x000000ff}, false, al, "al r1 0x000000ff", "al_r1_0x000000ff"},
- {{al, r1, 0x00ff00ff}, false, al, "al r1 0x00ff00ff", "al_r1_0x00ff00ff"},
- {{al, r1, 0xff00ff00}, false, al, "al r1 0xff00ff00", "al_r1_0xff00ff00"},
- {{al, r1, 0xffffffff}, false, al, "al r1 0xffffffff", "al_r1_0xffffffff"},
- {{al, r1, 0x00000156}, false, al, "al r1 0x00000156", "al_r1_0x00000156"},
- {{al, r1, 0x000002ac}, false, al, "al r1 0x000002ac", "al_r1_0x000002ac"},
- {{al, r1, 0x00000558}, false, al, "al r1 0x00000558", "al_r1_0x00000558"},
- {{al, r1, 0x00000ab0}, false, al, "al r1 0x00000ab0", "al_r1_0x00000ab0"},
- {{al, r1, 0x00001560}, false, al, "al r1 0x00001560", "al_r1_0x00001560"},
- {{al, r1, 0x00002ac0}, false, al, "al r1 0x00002ac0", "al_r1_0x00002ac0"},
- {{al, r1, 0x00005580}, false, al, "al r1 0x00005580", "al_r1_0x00005580"},
- {{al, r1, 0x0000ab00}, false, al, "al r1 0x0000ab00", "al_r1_0x0000ab00"},
- {{al, r1, 0x00015600}, false, al, "al r1 0x00015600", "al_r1_0x00015600"},
- {{al, r1, 0x0002ac00}, false, al, "al r1 0x0002ac00", "al_r1_0x0002ac00"},
- {{al, r1, 0x00055800}, false, al, "al r1 0x00055800", "al_r1_0x00055800"},
- {{al, r1, 0x000ab000}, false, al, "al r1 0x000ab000", "al_r1_0x000ab000"},
- {{al, r1, 0x00156000}, false, al, "al r1 0x00156000", "al_r1_0x00156000"},
- {{al, r1, 0x002ac000}, false, al, "al r1 0x002ac000", "al_r1_0x002ac000"},
- {{al, r1, 0x00558000}, false, al, "al r1 0x00558000", "al_r1_0x00558000"},
- {{al, r1, 0x00ab0000}, false, al, "al r1 0x00ab0000", "al_r1_0x00ab0000"},
- {{al, r1, 0x01560000}, false, al, "al r1 0x01560000", "al_r1_0x01560000"},
- {{al, r1, 0x02ac0000}, false, al, "al r1 0x02ac0000", "al_r1_0x02ac0000"},
- {{al, r1, 0x05580000}, false, al, "al r1 0x05580000", "al_r1_0x05580000"},
- {{al, r1, 0x0ab00000}, false, al, "al r1 0x0ab00000", "al_r1_0x0ab00000"},
- {{al, r1, 0x15600000}, false, al, "al r1 0x15600000", "al_r1_0x15600000"},
- {{al, r1, 0x2ac00000}, false, al, "al r1 0x2ac00000", "al_r1_0x2ac00000"},
- {{al, r1, 0x55800000}, false, al, "al r1 0x55800000", "al_r1_0x55800000"},
- {{al, r1, 0xab000000}, false, al, "al r1 0xab000000", "al_r1_0xab000000"},
- {{al, r1, 0x000000ab}, false, al, "al r1 0x000000ab", "al_r1_0x000000ab"},
- {{al, r1, 0x00ab00ab}, false, al, "al r1 0x00ab00ab", "al_r1_0x00ab00ab"},
- {{al, r1, 0xab00ab00}, false, al, "al r1 0xab00ab00", "al_r1_0xab00ab00"},
- {{al, r1, 0xabababab}, false, al, "al r1 0xabababab", "al_r1_0xabababab"},
- {{al, r2, 0x000001fe}, false, al, "al r2 0x000001fe", "al_r2_0x000001fe"},
- {{al, r2, 0x000003fc}, false, al, "al r2 0x000003fc", "al_r2_0x000003fc"},
- {{al, r2, 0x000007f8}, false, al, "al r2 0x000007f8", "al_r2_0x000007f8"},
- {{al, r2, 0x00000ff0}, false, al, "al r2 0x00000ff0", "al_r2_0x00000ff0"},
- {{al, r2, 0x00001fe0}, false, al, "al r2 0x00001fe0", "al_r2_0x00001fe0"},
- {{al, r2, 0x00003fc0}, false, al, "al r2 0x00003fc0", "al_r2_0x00003fc0"},
- {{al, r2, 0x00007f80}, false, al, "al r2 0x00007f80", "al_r2_0x00007f80"},
- {{al, r2, 0x0000ff00}, false, al, "al r2 0x0000ff00", "al_r2_0x0000ff00"},
- {{al, r2, 0x0001fe00}, false, al, "al r2 0x0001fe00", "al_r2_0x0001fe00"},
- {{al, r2, 0x0003fc00}, false, al, "al r2 0x0003fc00", "al_r2_0x0003fc00"},
- {{al, r2, 0x0007f800}, false, al, "al r2 0x0007f800", "al_r2_0x0007f800"},
- {{al, r2, 0x000ff000}, false, al, "al r2 0x000ff000", "al_r2_0x000ff000"},
- {{al, r2, 0x001fe000}, false, al, "al r2 0x001fe000", "al_r2_0x001fe000"},
- {{al, r2, 0x003fc000}, false, al, "al r2 0x003fc000", "al_r2_0x003fc000"},
- {{al, r2, 0x007f8000}, false, al, "al r2 0x007f8000", "al_r2_0x007f8000"},
- {{al, r2, 0x00ff0000}, false, al, "al r2 0x00ff0000", "al_r2_0x00ff0000"},
- {{al, r2, 0x01fe0000}, false, al, "al r2 0x01fe0000", "al_r2_0x01fe0000"},
- {{al, r2, 0x03fc0000}, false, al, "al r2 0x03fc0000", "al_r2_0x03fc0000"},
- {{al, r2, 0x07f80000}, false, al, "al r2 0x07f80000", "al_r2_0x07f80000"},
- {{al, r2, 0x0ff00000}, false, al, "al r2 0x0ff00000", "al_r2_0x0ff00000"},
- {{al, r2, 0x1fe00000}, false, al, "al r2 0x1fe00000", "al_r2_0x1fe00000"},
- {{al, r2, 0x3fc00000}, false, al, "al r2 0x3fc00000", "al_r2_0x3fc00000"},
- {{al, r2, 0x7f800000}, false, al, "al r2 0x7f800000", "al_r2_0x7f800000"},
- {{al, r2, 0xff000000}, false, al, "al r2 0xff000000", "al_r2_0xff000000"},
- {{al, r2, 0x000000ff}, false, al, "al r2 0x000000ff", "al_r2_0x000000ff"},
- {{al, r2, 0x00ff00ff}, false, al, "al r2 0x00ff00ff", "al_r2_0x00ff00ff"},
- {{al, r2, 0xff00ff00}, false, al, "al r2 0xff00ff00", "al_r2_0xff00ff00"},
- {{al, r2, 0xffffffff}, false, al, "al r2 0xffffffff", "al_r2_0xffffffff"},
- {{al, r2, 0x00000156}, false, al, "al r2 0x00000156", "al_r2_0x00000156"},
- {{al, r2, 0x000002ac}, false, al, "al r2 0x000002ac", "al_r2_0x000002ac"},
- {{al, r2, 0x00000558}, false, al, "al r2 0x00000558", "al_r2_0x00000558"},
- {{al, r2, 0x00000ab0}, false, al, "al r2 0x00000ab0", "al_r2_0x00000ab0"},
- {{al, r2, 0x00001560}, false, al, "al r2 0x00001560", "al_r2_0x00001560"},
- {{al, r2, 0x00002ac0}, false, al, "al r2 0x00002ac0", "al_r2_0x00002ac0"},
- {{al, r2, 0x00005580}, false, al, "al r2 0x00005580", "al_r2_0x00005580"},
- {{al, r2, 0x0000ab00}, false, al, "al r2 0x0000ab00", "al_r2_0x0000ab00"},
- {{al, r2, 0x00015600}, false, al, "al r2 0x00015600", "al_r2_0x00015600"},
- {{al, r2, 0x0002ac00}, false, al, "al r2 0x0002ac00", "al_r2_0x0002ac00"},
- {{al, r2, 0x00055800}, false, al, "al r2 0x00055800", "al_r2_0x00055800"},
- {{al, r2, 0x000ab000}, false, al, "al r2 0x000ab000", "al_r2_0x000ab000"},
- {{al, r2, 0x00156000}, false, al, "al r2 0x00156000", "al_r2_0x00156000"},
- {{al, r2, 0x002ac000}, false, al, "al r2 0x002ac000", "al_r2_0x002ac000"},
- {{al, r2, 0x00558000}, false, al, "al r2 0x00558000", "al_r2_0x00558000"},
- {{al, r2, 0x00ab0000}, false, al, "al r2 0x00ab0000", "al_r2_0x00ab0000"},
- {{al, r2, 0x01560000}, false, al, "al r2 0x01560000", "al_r2_0x01560000"},
- {{al, r2, 0x02ac0000}, false, al, "al r2 0x02ac0000", "al_r2_0x02ac0000"},
- {{al, r2, 0x05580000}, false, al, "al r2 0x05580000", "al_r2_0x05580000"},
- {{al, r2, 0x0ab00000}, false, al, "al r2 0x0ab00000", "al_r2_0x0ab00000"},
- {{al, r2, 0x15600000}, false, al, "al r2 0x15600000", "al_r2_0x15600000"},
- {{al, r2, 0x2ac00000}, false, al, "al r2 0x2ac00000", "al_r2_0x2ac00000"},
- {{al, r2, 0x55800000}, false, al, "al r2 0x55800000", "al_r2_0x55800000"},
- {{al, r2, 0xab000000}, false, al, "al r2 0xab000000", "al_r2_0xab000000"},
- {{al, r2, 0x000000ab}, false, al, "al r2 0x000000ab", "al_r2_0x000000ab"},
- {{al, r2, 0x00ab00ab}, false, al, "al r2 0x00ab00ab", "al_r2_0x00ab00ab"},
- {{al, r2, 0xab00ab00}, false, al, "al r2 0xab00ab00", "al_r2_0xab00ab00"},
- {{al, r2, 0xabababab}, false, al, "al r2 0xabababab", "al_r2_0xabababab"},
- {{al, r3, 0x000001fe}, false, al, "al r3 0x000001fe", "al_r3_0x000001fe"},
- {{al, r3, 0x000003fc}, false, al, "al r3 0x000003fc", "al_r3_0x000003fc"},
- {{al, r3, 0x000007f8}, false, al, "al r3 0x000007f8", "al_r3_0x000007f8"},
- {{al, r3, 0x00000ff0}, false, al, "al r3 0x00000ff0", "al_r3_0x00000ff0"},
- {{al, r3, 0x00001fe0}, false, al, "al r3 0x00001fe0", "al_r3_0x00001fe0"},
- {{al, r3, 0x00003fc0}, false, al, "al r3 0x00003fc0", "al_r3_0x00003fc0"},
- {{al, r3, 0x00007f80}, false, al, "al r3 0x00007f80", "al_r3_0x00007f80"},
- {{al, r3, 0x0000ff00}, false, al, "al r3 0x0000ff00", "al_r3_0x0000ff00"},
- {{al, r3, 0x0001fe00}, false, al, "al r3 0x0001fe00", "al_r3_0x0001fe00"},
- {{al, r3, 0x0003fc00}, false, al, "al r3 0x0003fc00", "al_r3_0x0003fc00"},
- {{al, r3, 0x0007f800}, false, al, "al r3 0x0007f800", "al_r3_0x0007f800"},
- {{al, r3, 0x000ff000}, false, al, "al r3 0x000ff000", "al_r3_0x000ff000"},
- {{al, r3, 0x001fe000}, false, al, "al r3 0x001fe000", "al_r3_0x001fe000"},
- {{al, r3, 0x003fc000}, false, al, "al r3 0x003fc000", "al_r3_0x003fc000"},
- {{al, r3, 0x007f8000}, false, al, "al r3 0x007f8000", "al_r3_0x007f8000"},
- {{al, r3, 0x00ff0000}, false, al, "al r3 0x00ff0000", "al_r3_0x00ff0000"},
- {{al, r3, 0x01fe0000}, false, al, "al r3 0x01fe0000", "al_r3_0x01fe0000"},
- {{al, r3, 0x03fc0000}, false, al, "al r3 0x03fc0000", "al_r3_0x03fc0000"},
- {{al, r3, 0x07f80000}, false, al, "al r3 0x07f80000", "al_r3_0x07f80000"},
- {{al, r3, 0x0ff00000}, false, al, "al r3 0x0ff00000", "al_r3_0x0ff00000"},
- {{al, r3, 0x1fe00000}, false, al, "al r3 0x1fe00000", "al_r3_0x1fe00000"},
- {{al, r3, 0x3fc00000}, false, al, "al r3 0x3fc00000", "al_r3_0x3fc00000"},
- {{al, r3, 0x7f800000}, false, al, "al r3 0x7f800000", "al_r3_0x7f800000"},
- {{al, r3, 0xff000000}, false, al, "al r3 0xff000000", "al_r3_0xff000000"},
- {{al, r3, 0x000000ff}, false, al, "al r3 0x000000ff", "al_r3_0x000000ff"},
- {{al, r3, 0x00ff00ff}, false, al, "al r3 0x00ff00ff", "al_r3_0x00ff00ff"},
- {{al, r3, 0xff00ff00}, false, al, "al r3 0xff00ff00", "al_r3_0xff00ff00"},
- {{al, r3, 0xffffffff}, false, al, "al r3 0xffffffff", "al_r3_0xffffffff"},
- {{al, r3, 0x00000156}, false, al, "al r3 0x00000156", "al_r3_0x00000156"},
- {{al, r3, 0x000002ac}, false, al, "al r3 0x000002ac", "al_r3_0x000002ac"},
- {{al, r3, 0x00000558}, false, al, "al r3 0x00000558", "al_r3_0x00000558"},
- {{al, r3, 0x00000ab0}, false, al, "al r3 0x00000ab0", "al_r3_0x00000ab0"},
- {{al, r3, 0x00001560}, false, al, "al r3 0x00001560", "al_r3_0x00001560"},
- {{al, r3, 0x00002ac0}, false, al, "al r3 0x00002ac0", "al_r3_0x00002ac0"},
- {{al, r3, 0x00005580}, false, al, "al r3 0x00005580", "al_r3_0x00005580"},
- {{al, r3, 0x0000ab00}, false, al, "al r3 0x0000ab00", "al_r3_0x0000ab00"},
- {{al, r3, 0x00015600}, false, al, "al r3 0x00015600", "al_r3_0x00015600"},
- {{al, r3, 0x0002ac00}, false, al, "al r3 0x0002ac00", "al_r3_0x0002ac00"},
- {{al, r3, 0x00055800}, false, al, "al r3 0x00055800", "al_r3_0x00055800"},
- {{al, r3, 0x000ab000}, false, al, "al r3 0x000ab000", "al_r3_0x000ab000"},
- {{al, r3, 0x00156000}, false, al, "al r3 0x00156000", "al_r3_0x00156000"},
- {{al, r3, 0x002ac000}, false, al, "al r3 0x002ac000", "al_r3_0x002ac000"},
- {{al, r3, 0x00558000}, false, al, "al r3 0x00558000", "al_r3_0x00558000"},
- {{al, r3, 0x00ab0000}, false, al, "al r3 0x00ab0000", "al_r3_0x00ab0000"},
- {{al, r3, 0x01560000}, false, al, "al r3 0x01560000", "al_r3_0x01560000"},
- {{al, r3, 0x02ac0000}, false, al, "al r3 0x02ac0000", "al_r3_0x02ac0000"},
- {{al, r3, 0x05580000}, false, al, "al r3 0x05580000", "al_r3_0x05580000"},
- {{al, r3, 0x0ab00000}, false, al, "al r3 0x0ab00000", "al_r3_0x0ab00000"},
- {{al, r3, 0x15600000}, false, al, "al r3 0x15600000", "al_r3_0x15600000"},
- {{al, r3, 0x2ac00000}, false, al, "al r3 0x2ac00000", "al_r3_0x2ac00000"},
- {{al, r3, 0x55800000}, false, al, "al r3 0x55800000", "al_r3_0x55800000"},
- {{al, r3, 0xab000000}, false, al, "al r3 0xab000000", "al_r3_0xab000000"},
- {{al, r3, 0x000000ab}, false, al, "al r3 0x000000ab", "al_r3_0x000000ab"},
- {{al, r3, 0x00ab00ab}, false, al, "al r3 0x00ab00ab", "al_r3_0x00ab00ab"},
- {{al, r3, 0xab00ab00}, false, al, "al r3 0xab00ab00", "al_r3_0xab00ab00"},
- {{al, r3, 0xabababab}, false, al, "al r3 0xabababab", "al_r3_0xabababab"},
- {{al, r4, 0x000001fe}, false, al, "al r4 0x000001fe", "al_r4_0x000001fe"},
- {{al, r4, 0x000003fc}, false, al, "al r4 0x000003fc", "al_r4_0x000003fc"},
- {{al, r4, 0x000007f8}, false, al, "al r4 0x000007f8", "al_r4_0x000007f8"},
- {{al, r4, 0x00000ff0}, false, al, "al r4 0x00000ff0", "al_r4_0x00000ff0"},
- {{al, r4, 0x00001fe0}, false, al, "al r4 0x00001fe0", "al_r4_0x00001fe0"},
- {{al, r4, 0x00003fc0}, false, al, "al r4 0x00003fc0", "al_r4_0x00003fc0"},
- {{al, r4, 0x00007f80}, false, al, "al r4 0x00007f80", "al_r4_0x00007f80"},
- {{al, r4, 0x0000ff00}, false, al, "al r4 0x0000ff00", "al_r4_0x0000ff00"},
- {{al, r4, 0x0001fe00}, false, al, "al r4 0x0001fe00", "al_r4_0x0001fe00"},
- {{al, r4, 0x0003fc00}, false, al, "al r4 0x0003fc00", "al_r4_0x0003fc00"},
- {{al, r4, 0x0007f800}, false, al, "al r4 0x0007f800", "al_r4_0x0007f800"},
- {{al, r4, 0x000ff000}, false, al, "al r4 0x000ff000", "al_r4_0x000ff000"},
- {{al, r4, 0x001fe000}, false, al, "al r4 0x001fe000", "al_r4_0x001fe000"},
- {{al, r4, 0x003fc000}, false, al, "al r4 0x003fc000", "al_r4_0x003fc000"},
- {{al, r4, 0x007f8000}, false, al, "al r4 0x007f8000", "al_r4_0x007f8000"},
- {{al, r4, 0x00ff0000}, false, al, "al r4 0x00ff0000", "al_r4_0x00ff0000"},
- {{al, r4, 0x01fe0000}, false, al, "al r4 0x01fe0000", "al_r4_0x01fe0000"},
- {{al, r4, 0x03fc0000}, false, al, "al r4 0x03fc0000", "al_r4_0x03fc0000"},
- {{al, r4, 0x07f80000}, false, al, "al r4 0x07f80000", "al_r4_0x07f80000"},
- {{al, r4, 0x0ff00000}, false, al, "al r4 0x0ff00000", "al_r4_0x0ff00000"},
- {{al, r4, 0x1fe00000}, false, al, "al r4 0x1fe00000", "al_r4_0x1fe00000"},
- {{al, r4, 0x3fc00000}, false, al, "al r4 0x3fc00000", "al_r4_0x3fc00000"},
- {{al, r4, 0x7f800000}, false, al, "al r4 0x7f800000", "al_r4_0x7f800000"},
- {{al, r4, 0xff000000}, false, al, "al r4 0xff000000", "al_r4_0xff000000"},
- {{al, r4, 0x000000ff}, false, al, "al r4 0x000000ff", "al_r4_0x000000ff"},
- {{al, r4, 0x00ff00ff}, false, al, "al r4 0x00ff00ff", "al_r4_0x00ff00ff"},
- {{al, r4, 0xff00ff00}, false, al, "al r4 0xff00ff00", "al_r4_0xff00ff00"},
- {{al, r4, 0xffffffff}, false, al, "al r4 0xffffffff", "al_r4_0xffffffff"},
- {{al, r4, 0x00000156}, false, al, "al r4 0x00000156", "al_r4_0x00000156"},
- {{al, r4, 0x000002ac}, false, al, "al r4 0x000002ac", "al_r4_0x000002ac"},
- {{al, r4, 0x00000558}, false, al, "al r4 0x00000558", "al_r4_0x00000558"},
- {{al, r4, 0x00000ab0}, false, al, "al r4 0x00000ab0", "al_r4_0x00000ab0"},
- {{al, r4, 0x00001560}, false, al, "al r4 0x00001560", "al_r4_0x00001560"},
- {{al, r4, 0x00002ac0}, false, al, "al r4 0x00002ac0", "al_r4_0x00002ac0"},
- {{al, r4, 0x00005580}, false, al, "al r4 0x00005580", "al_r4_0x00005580"},
- {{al, r4, 0x0000ab00}, false, al, "al r4 0x0000ab00", "al_r4_0x0000ab00"},
- {{al, r4, 0x00015600}, false, al, "al r4 0x00015600", "al_r4_0x00015600"},
- {{al, r4, 0x0002ac00}, false, al, "al r4 0x0002ac00", "al_r4_0x0002ac00"},
- {{al, r4, 0x00055800}, false, al, "al r4 0x00055800", "al_r4_0x00055800"},
- {{al, r4, 0x000ab000}, false, al, "al r4 0x000ab000", "al_r4_0x000ab000"},
- {{al, r4, 0x00156000}, false, al, "al r4 0x00156000", "al_r4_0x00156000"},
- {{al, r4, 0x002ac000}, false, al, "al r4 0x002ac000", "al_r4_0x002ac000"},
- {{al, r4, 0x00558000}, false, al, "al r4 0x00558000", "al_r4_0x00558000"},
- {{al, r4, 0x00ab0000}, false, al, "al r4 0x00ab0000", "al_r4_0x00ab0000"},
- {{al, r4, 0x01560000}, false, al, "al r4 0x01560000", "al_r4_0x01560000"},
- {{al, r4, 0x02ac0000}, false, al, "al r4 0x02ac0000", "al_r4_0x02ac0000"},
- {{al, r4, 0x05580000}, false, al, "al r4 0x05580000", "al_r4_0x05580000"},
- {{al, r4, 0x0ab00000}, false, al, "al r4 0x0ab00000", "al_r4_0x0ab00000"},
- {{al, r4, 0x15600000}, false, al, "al r4 0x15600000", "al_r4_0x15600000"},
- {{al, r4, 0x2ac00000}, false, al, "al r4 0x2ac00000", "al_r4_0x2ac00000"},
- {{al, r4, 0x55800000}, false, al, "al r4 0x55800000", "al_r4_0x55800000"},
- {{al, r4, 0xab000000}, false, al, "al r4 0xab000000", "al_r4_0xab000000"},
- {{al, r4, 0x000000ab}, false, al, "al r4 0x000000ab", "al_r4_0x000000ab"},
- {{al, r4, 0x00ab00ab}, false, al, "al r4 0x00ab00ab", "al_r4_0x00ab00ab"},
- {{al, r4, 0xab00ab00}, false, al, "al r4 0xab00ab00", "al_r4_0xab00ab00"},
- {{al, r4, 0xabababab}, false, al, "al r4 0xabababab", "al_r4_0xabababab"},
- {{al, r5, 0x000001fe}, false, al, "al r5 0x000001fe", "al_r5_0x000001fe"},
- {{al, r5, 0x000003fc}, false, al, "al r5 0x000003fc", "al_r5_0x000003fc"},
- {{al, r5, 0x000007f8}, false, al, "al r5 0x000007f8", "al_r5_0x000007f8"},
- {{al, r5, 0x00000ff0}, false, al, "al r5 0x00000ff0", "al_r5_0x00000ff0"},
- {{al, r5, 0x00001fe0}, false, al, "al r5 0x00001fe0", "al_r5_0x00001fe0"},
- {{al, r5, 0x00003fc0}, false, al, "al r5 0x00003fc0", "al_r5_0x00003fc0"},
- {{al, r5, 0x00007f80}, false, al, "al r5 0x00007f80", "al_r5_0x00007f80"},
- {{al, r5, 0x0000ff00}, false, al, "al r5 0x0000ff00", "al_r5_0x0000ff00"},
- {{al, r5, 0x0001fe00}, false, al, "al r5 0x0001fe00", "al_r5_0x0001fe00"},
- {{al, r5, 0x0003fc00}, false, al, "al r5 0x0003fc00", "al_r5_0x0003fc00"},
- {{al, r5, 0x0007f800}, false, al, "al r5 0x0007f800", "al_r5_0x0007f800"},
- {{al, r5, 0x000ff000}, false, al, "al r5 0x000ff000", "al_r5_0x000ff000"},
- {{al, r5, 0x001fe000}, false, al, "al r5 0x001fe000", "al_r5_0x001fe000"},
- {{al, r5, 0x003fc000}, false, al, "al r5 0x003fc000", "al_r5_0x003fc000"},
- {{al, r5, 0x007f8000}, false, al, "al r5 0x007f8000", "al_r5_0x007f8000"},
- {{al, r5, 0x00ff0000}, false, al, "al r5 0x00ff0000", "al_r5_0x00ff0000"},
- {{al, r5, 0x01fe0000}, false, al, "al r5 0x01fe0000", "al_r5_0x01fe0000"},
- {{al, r5, 0x03fc0000}, false, al, "al r5 0x03fc0000", "al_r5_0x03fc0000"},
- {{al, r5, 0x07f80000}, false, al, "al r5 0x07f80000", "al_r5_0x07f80000"},
- {{al, r5, 0x0ff00000}, false, al, "al r5 0x0ff00000", "al_r5_0x0ff00000"},
- {{al, r5, 0x1fe00000}, false, al, "al r5 0x1fe00000", "al_r5_0x1fe00000"},
- {{al, r5, 0x3fc00000}, false, al, "al r5 0x3fc00000", "al_r5_0x3fc00000"},
- {{al, r5, 0x7f800000}, false, al, "al r5 0x7f800000", "al_r5_0x7f800000"},
- {{al, r5, 0xff000000}, false, al, "al r5 0xff000000", "al_r5_0xff000000"},
- {{al, r5, 0x000000ff}, false, al, "al r5 0x000000ff", "al_r5_0x000000ff"},
- {{al, r5, 0x00ff00ff}, false, al, "al r5 0x00ff00ff", "al_r5_0x00ff00ff"},
- {{al, r5, 0xff00ff00}, false, al, "al r5 0xff00ff00", "al_r5_0xff00ff00"},
- {{al, r5, 0xffffffff}, false, al, "al r5 0xffffffff", "al_r5_0xffffffff"},
- {{al, r5, 0x00000156}, false, al, "al r5 0x00000156", "al_r5_0x00000156"},
- {{al, r5, 0x000002ac}, false, al, "al r5 0x000002ac", "al_r5_0x000002ac"},
- {{al, r5, 0x00000558}, false, al, "al r5 0x00000558", "al_r5_0x00000558"},
- {{al, r5, 0x00000ab0}, false, al, "al r5 0x00000ab0", "al_r5_0x00000ab0"},
- {{al, r5, 0x00001560}, false, al, "al r5 0x00001560", "al_r5_0x00001560"},
- {{al, r5, 0x00002ac0}, false, al, "al r5 0x00002ac0", "al_r5_0x00002ac0"},
- {{al, r5, 0x00005580}, false, al, "al r5 0x00005580", "al_r5_0x00005580"},
- {{al, r5, 0x0000ab00}, false, al, "al r5 0x0000ab00", "al_r5_0x0000ab00"},
- {{al, r5, 0x00015600}, false, al, "al r5 0x00015600", "al_r5_0x00015600"},
- {{al, r5, 0x0002ac00}, false, al, "al r5 0x0002ac00", "al_r5_0x0002ac00"},
- {{al, r5, 0x00055800}, false, al, "al r5 0x00055800", "al_r5_0x00055800"},
- {{al, r5, 0x000ab000}, false, al, "al r5 0x000ab000", "al_r5_0x000ab000"},
- {{al, r5, 0x00156000}, false, al, "al r5 0x00156000", "al_r5_0x00156000"},
- {{al, r5, 0x002ac000}, false, al, "al r5 0x002ac000", "al_r5_0x002ac000"},
- {{al, r5, 0x00558000}, false, al, "al r5 0x00558000", "al_r5_0x00558000"},
- {{al, r5, 0x00ab0000}, false, al, "al r5 0x00ab0000", "al_r5_0x00ab0000"},
- {{al, r5, 0x01560000}, false, al, "al r5 0x01560000", "al_r5_0x01560000"},
- {{al, r5, 0x02ac0000}, false, al, "al r5 0x02ac0000", "al_r5_0x02ac0000"},
- {{al, r5, 0x05580000}, false, al, "al r5 0x05580000", "al_r5_0x05580000"},
- {{al, r5, 0x0ab00000}, false, al, "al r5 0x0ab00000", "al_r5_0x0ab00000"},
- {{al, r5, 0x15600000}, false, al, "al r5 0x15600000", "al_r5_0x15600000"},
- {{al, r5, 0x2ac00000}, false, al, "al r5 0x2ac00000", "al_r5_0x2ac00000"},
- {{al, r5, 0x55800000}, false, al, "al r5 0x55800000", "al_r5_0x55800000"},
- {{al, r5, 0xab000000}, false, al, "al r5 0xab000000", "al_r5_0xab000000"},
- {{al, r5, 0x000000ab}, false, al, "al r5 0x000000ab", "al_r5_0x000000ab"},
- {{al, r5, 0x00ab00ab}, false, al, "al r5 0x00ab00ab", "al_r5_0x00ab00ab"},
- {{al, r5, 0xab00ab00}, false, al, "al r5 0xab00ab00", "al_r5_0xab00ab00"},
- {{al, r5, 0xabababab}, false, al, "al r5 0xabababab", "al_r5_0xabababab"},
- {{al, r6, 0x000001fe}, false, al, "al r6 0x000001fe", "al_r6_0x000001fe"},
- {{al, r6, 0x000003fc}, false, al, "al r6 0x000003fc", "al_r6_0x000003fc"},
- {{al, r6, 0x000007f8}, false, al, "al r6 0x000007f8", "al_r6_0x000007f8"},
- {{al, r6, 0x00000ff0}, false, al, "al r6 0x00000ff0", "al_r6_0x00000ff0"},
- {{al, r6, 0x00001fe0}, false, al, "al r6 0x00001fe0", "al_r6_0x00001fe0"},
- {{al, r6, 0x00003fc0}, false, al, "al r6 0x00003fc0", "al_r6_0x00003fc0"},
- {{al, r6, 0x00007f80}, false, al, "al r6 0x00007f80", "al_r6_0x00007f80"},
- {{al, r6, 0x0000ff00}, false, al, "al r6 0x0000ff00", "al_r6_0x0000ff00"},
- {{al, r6, 0x0001fe00}, false, al, "al r6 0x0001fe00", "al_r6_0x0001fe00"},
- {{al, r6, 0x0003fc00}, false, al, "al r6 0x0003fc00", "al_r6_0x0003fc00"},
- {{al, r6, 0x0007f800}, false, al, "al r6 0x0007f800", "al_r6_0x0007f800"},
- {{al, r6, 0x000ff000}, false, al, "al r6 0x000ff000", "al_r6_0x000ff000"},
- {{al, r6, 0x001fe000}, false, al, "al r6 0x001fe000", "al_r6_0x001fe000"},
- {{al, r6, 0x003fc000}, false, al, "al r6 0x003fc000", "al_r6_0x003fc000"},
- {{al, r6, 0x007f8000}, false, al, "al r6 0x007f8000", "al_r6_0x007f8000"},
- {{al, r6, 0x00ff0000}, false, al, "al r6 0x00ff0000", "al_r6_0x00ff0000"},
- {{al, r6, 0x01fe0000}, false, al, "al r6 0x01fe0000", "al_r6_0x01fe0000"},
- {{al, r6, 0x03fc0000}, false, al, "al r6 0x03fc0000", "al_r6_0x03fc0000"},
- {{al, r6, 0x07f80000}, false, al, "al r6 0x07f80000", "al_r6_0x07f80000"},
- {{al, r6, 0x0ff00000}, false, al, "al r6 0x0ff00000", "al_r6_0x0ff00000"},
- {{al, r6, 0x1fe00000}, false, al, "al r6 0x1fe00000", "al_r6_0x1fe00000"},
- {{al, r6, 0x3fc00000}, false, al, "al r6 0x3fc00000", "al_r6_0x3fc00000"},
- {{al, r6, 0x7f800000}, false, al, "al r6 0x7f800000", "al_r6_0x7f800000"},
- {{al, r6, 0xff000000}, false, al, "al r6 0xff000000", "al_r6_0xff000000"},
- {{al, r6, 0x000000ff}, false, al, "al r6 0x000000ff", "al_r6_0x000000ff"},
- {{al, r6, 0x00ff00ff}, false, al, "al r6 0x00ff00ff", "al_r6_0x00ff00ff"},
- {{al, r6, 0xff00ff00}, false, al, "al r6 0xff00ff00", "al_r6_0xff00ff00"},
- {{al, r6, 0xffffffff}, false, al, "al r6 0xffffffff", "al_r6_0xffffffff"},
- {{al, r6, 0x00000156}, false, al, "al r6 0x00000156", "al_r6_0x00000156"},
- {{al, r6, 0x000002ac}, false, al, "al r6 0x000002ac", "al_r6_0x000002ac"},
- {{al, r6, 0x00000558}, false, al, "al r6 0x00000558", "al_r6_0x00000558"},
- {{al, r6, 0x00000ab0}, false, al, "al r6 0x00000ab0", "al_r6_0x00000ab0"},
- {{al, r6, 0x00001560}, false, al, "al r6 0x00001560", "al_r6_0x00001560"},
- {{al, r6, 0x00002ac0}, false, al, "al r6 0x00002ac0", "al_r6_0x00002ac0"},
- {{al, r6, 0x00005580}, false, al, "al r6 0x00005580", "al_r6_0x00005580"},
- {{al, r6, 0x0000ab00}, false, al, "al r6 0x0000ab00", "al_r6_0x0000ab00"},
- {{al, r6, 0x00015600}, false, al, "al r6 0x00015600", "al_r6_0x00015600"},
- {{al, r6, 0x0002ac00}, false, al, "al r6 0x0002ac00", "al_r6_0x0002ac00"},
- {{al, r6, 0x00055800}, false, al, "al r6 0x00055800", "al_r6_0x00055800"},
- {{al, r6, 0x000ab000}, false, al, "al r6 0x000ab000", "al_r6_0x000ab000"},
- {{al, r6, 0x00156000}, false, al, "al r6 0x00156000", "al_r6_0x00156000"},
- {{al, r6, 0x002ac000}, false, al, "al r6 0x002ac000", "al_r6_0x002ac000"},
- {{al, r6, 0x00558000}, false, al, "al r6 0x00558000", "al_r6_0x00558000"},
- {{al, r6, 0x00ab0000}, false, al, "al r6 0x00ab0000", "al_r6_0x00ab0000"},
- {{al, r6, 0x01560000}, false, al, "al r6 0x01560000", "al_r6_0x01560000"},
- {{al, r6, 0x02ac0000}, false, al, "al r6 0x02ac0000", "al_r6_0x02ac0000"},
- {{al, r6, 0x05580000}, false, al, "al r6 0x05580000", "al_r6_0x05580000"},
- {{al, r6, 0x0ab00000}, false, al, "al r6 0x0ab00000", "al_r6_0x0ab00000"},
- {{al, r6, 0x15600000}, false, al, "al r6 0x15600000", "al_r6_0x15600000"},
- {{al, r6, 0x2ac00000}, false, al, "al r6 0x2ac00000", "al_r6_0x2ac00000"},
- {{al, r6, 0x55800000}, false, al, "al r6 0x55800000", "al_r6_0x55800000"},
- {{al, r6, 0xab000000}, false, al, "al r6 0xab000000", "al_r6_0xab000000"},
- {{al, r6, 0x000000ab}, false, al, "al r6 0x000000ab", "al_r6_0x000000ab"},
- {{al, r6, 0x00ab00ab}, false, al, "al r6 0x00ab00ab", "al_r6_0x00ab00ab"},
- {{al, r6, 0xab00ab00}, false, al, "al r6 0xab00ab00", "al_r6_0xab00ab00"},
- {{al, r6, 0xabababab}, false, al, "al r6 0xabababab", "al_r6_0xabababab"},
- {{al, r7, 0x000001fe}, false, al, "al r7 0x000001fe", "al_r7_0x000001fe"},
- {{al, r7, 0x000003fc}, false, al, "al r7 0x000003fc", "al_r7_0x000003fc"},
- {{al, r7, 0x000007f8}, false, al, "al r7 0x000007f8", "al_r7_0x000007f8"},
- {{al, r7, 0x00000ff0}, false, al, "al r7 0x00000ff0", "al_r7_0x00000ff0"},
- {{al, r7, 0x00001fe0}, false, al, "al r7 0x00001fe0", "al_r7_0x00001fe0"},
- {{al, r7, 0x00003fc0}, false, al, "al r7 0x00003fc0", "al_r7_0x00003fc0"},
- {{al, r7, 0x00007f80}, false, al, "al r7 0x00007f80", "al_r7_0x00007f80"},
- {{al, r7, 0x0000ff00}, false, al, "al r7 0x0000ff00", "al_r7_0x0000ff00"},
- {{al, r7, 0x0001fe00}, false, al, "al r7 0x0001fe00", "al_r7_0x0001fe00"},
- {{al, r7, 0x0003fc00}, false, al, "al r7 0x0003fc00", "al_r7_0x0003fc00"},
- {{al, r7, 0x0007f800}, false, al, "al r7 0x0007f800", "al_r7_0x0007f800"},
- {{al, r7, 0x000ff000}, false, al, "al r7 0x000ff000", "al_r7_0x000ff000"},
- {{al, r7, 0x001fe000}, false, al, "al r7 0x001fe000", "al_r7_0x001fe000"},
- {{al, r7, 0x003fc000}, false, al, "al r7 0x003fc000", "al_r7_0x003fc000"},
- {{al, r7, 0x007f8000}, false, al, "al r7 0x007f8000", "al_r7_0x007f8000"},
- {{al, r7, 0x00ff0000}, false, al, "al r7 0x00ff0000", "al_r7_0x00ff0000"},
- {{al, r7, 0x01fe0000}, false, al, "al r7 0x01fe0000", "al_r7_0x01fe0000"},
- {{al, r7, 0x03fc0000}, false, al, "al r7 0x03fc0000", "al_r7_0x03fc0000"},
- {{al, r7, 0x07f80000}, false, al, "al r7 0x07f80000", "al_r7_0x07f80000"},
- {{al, r7, 0x0ff00000}, false, al, "al r7 0x0ff00000", "al_r7_0x0ff00000"},
- {{al, r7, 0x1fe00000}, false, al, "al r7 0x1fe00000", "al_r7_0x1fe00000"},
- {{al, r7, 0x3fc00000}, false, al, "al r7 0x3fc00000", "al_r7_0x3fc00000"},
- {{al, r7, 0x7f800000}, false, al, "al r7 0x7f800000", "al_r7_0x7f800000"},
- {{al, r7, 0xff000000}, false, al, "al r7 0xff000000", "al_r7_0xff000000"},
- {{al, r7, 0x000000ff}, false, al, "al r7 0x000000ff", "al_r7_0x000000ff"},
- {{al, r7, 0x00ff00ff}, false, al, "al r7 0x00ff00ff", "al_r7_0x00ff00ff"},
- {{al, r7, 0xff00ff00}, false, al, "al r7 0xff00ff00", "al_r7_0xff00ff00"},
- {{al, r7, 0xffffffff}, false, al, "al r7 0xffffffff", "al_r7_0xffffffff"},
- {{al, r7, 0x00000156}, false, al, "al r7 0x00000156", "al_r7_0x00000156"},
- {{al, r7, 0x000002ac}, false, al, "al r7 0x000002ac", "al_r7_0x000002ac"},
- {{al, r7, 0x00000558}, false, al, "al r7 0x00000558", "al_r7_0x00000558"},
- {{al, r7, 0x00000ab0}, false, al, "al r7 0x00000ab0", "al_r7_0x00000ab0"},
- {{al, r7, 0x00001560}, false, al, "al r7 0x00001560", "al_r7_0x00001560"},
- {{al, r7, 0x00002ac0}, false, al, "al r7 0x00002ac0", "al_r7_0x00002ac0"},
- {{al, r7, 0x00005580}, false, al, "al r7 0x00005580", "al_r7_0x00005580"},
- {{al, r7, 0x0000ab00}, false, al, "al r7 0x0000ab00", "al_r7_0x0000ab00"},
- {{al, r7, 0x00015600}, false, al, "al r7 0x00015600", "al_r7_0x00015600"},
- {{al, r7, 0x0002ac00}, false, al, "al r7 0x0002ac00", "al_r7_0x0002ac00"},
- {{al, r7, 0x00055800}, false, al, "al r7 0x00055800", "al_r7_0x00055800"},
- {{al, r7, 0x000ab000}, false, al, "al r7 0x000ab000", "al_r7_0x000ab000"},
- {{al, r7, 0x00156000}, false, al, "al r7 0x00156000", "al_r7_0x00156000"},
- {{al, r7, 0x002ac000}, false, al, "al r7 0x002ac000", "al_r7_0x002ac000"},
- {{al, r7, 0x00558000}, false, al, "al r7 0x00558000", "al_r7_0x00558000"},
- {{al, r7, 0x00ab0000}, false, al, "al r7 0x00ab0000", "al_r7_0x00ab0000"},
- {{al, r7, 0x01560000}, false, al, "al r7 0x01560000", "al_r7_0x01560000"},
- {{al, r7, 0x02ac0000}, false, al, "al r7 0x02ac0000", "al_r7_0x02ac0000"},
- {{al, r7, 0x05580000}, false, al, "al r7 0x05580000", "al_r7_0x05580000"},
- {{al, r7, 0x0ab00000}, false, al, "al r7 0x0ab00000", "al_r7_0x0ab00000"},
- {{al, r7, 0x15600000}, false, al, "al r7 0x15600000", "al_r7_0x15600000"},
- {{al, r7, 0x2ac00000}, false, al, "al r7 0x2ac00000", "al_r7_0x2ac00000"},
- {{al, r7, 0x55800000}, false, al, "al r7 0x55800000", "al_r7_0x55800000"},
- {{al, r7, 0xab000000}, false, al, "al r7 0xab000000", "al_r7_0xab000000"},
- {{al, r7, 0x000000ab}, false, al, "al r7 0x000000ab", "al_r7_0x000000ab"},
- {{al, r7, 0x00ab00ab}, false, al, "al r7 0x00ab00ab", "al_r7_0x00ab00ab"},
- {{al, r7, 0xab00ab00}, false, al, "al r7 0xab00ab00", "al_r7_0xab00ab00"},
- {{al, r7, 0xabababab}, false, al, "al r7 0xabababab", "al_r7_0xabababab"},
- {{al, r8, 0x000001fe}, false, al, "al r8 0x000001fe", "al_r8_0x000001fe"},
- {{al, r8, 0x000003fc}, false, al, "al r8 0x000003fc", "al_r8_0x000003fc"},
- {{al, r8, 0x000007f8}, false, al, "al r8 0x000007f8", "al_r8_0x000007f8"},
- {{al, r8, 0x00000ff0}, false, al, "al r8 0x00000ff0", "al_r8_0x00000ff0"},
- {{al, r8, 0x00001fe0}, false, al, "al r8 0x00001fe0", "al_r8_0x00001fe0"},
- {{al, r8, 0x00003fc0}, false, al, "al r8 0x00003fc0", "al_r8_0x00003fc0"},
- {{al, r8, 0x00007f80}, false, al, "al r8 0x00007f80", "al_r8_0x00007f80"},
- {{al, r8, 0x0000ff00}, false, al, "al r8 0x0000ff00", "al_r8_0x0000ff00"},
- {{al, r8, 0x0001fe00}, false, al, "al r8 0x0001fe00", "al_r8_0x0001fe00"},
- {{al, r8, 0x0003fc00}, false, al, "al r8 0x0003fc00", "al_r8_0x0003fc00"},
- {{al, r8, 0x0007f800}, false, al, "al r8 0x0007f800", "al_r8_0x0007f800"},
- {{al, r8, 0x000ff000}, false, al, "al r8 0x000ff000", "al_r8_0x000ff000"},
- {{al, r8, 0x001fe000}, false, al, "al r8 0x001fe000", "al_r8_0x001fe000"},
- {{al, r8, 0x003fc000}, false, al, "al r8 0x003fc000", "al_r8_0x003fc000"},
- {{al, r8, 0x007f8000}, false, al, "al r8 0x007f8000", "al_r8_0x007f8000"},
- {{al, r8, 0x00ff0000}, false, al, "al r8 0x00ff0000", "al_r8_0x00ff0000"},
- {{al, r8, 0x01fe0000}, false, al, "al r8 0x01fe0000", "al_r8_0x01fe0000"},
- {{al, r8, 0x03fc0000}, false, al, "al r8 0x03fc0000", "al_r8_0x03fc0000"},
- {{al, r8, 0x07f80000}, false, al, "al r8 0x07f80000", "al_r8_0x07f80000"},
- {{al, r8, 0x0ff00000}, false, al, "al r8 0x0ff00000", "al_r8_0x0ff00000"},
- {{al, r8, 0x1fe00000}, false, al, "al r8 0x1fe00000", "al_r8_0x1fe00000"},
- {{al, r8, 0x3fc00000}, false, al, "al r8 0x3fc00000", "al_r8_0x3fc00000"},
- {{al, r8, 0x7f800000}, false, al, "al r8 0x7f800000", "al_r8_0x7f800000"},
- {{al, r8, 0xff000000}, false, al, "al r8 0xff000000", "al_r8_0xff000000"},
- {{al, r8, 0x000000ff}, false, al, "al r8 0x000000ff", "al_r8_0x000000ff"},
- {{al, r8, 0x00ff00ff}, false, al, "al r8 0x00ff00ff", "al_r8_0x00ff00ff"},
- {{al, r8, 0xff00ff00}, false, al, "al r8 0xff00ff00", "al_r8_0xff00ff00"},
- {{al, r8, 0xffffffff}, false, al, "al r8 0xffffffff", "al_r8_0xffffffff"},
- {{al, r8, 0x00000156}, false, al, "al r8 0x00000156", "al_r8_0x00000156"},
- {{al, r8, 0x000002ac}, false, al, "al r8 0x000002ac", "al_r8_0x000002ac"},
- {{al, r8, 0x00000558}, false, al, "al r8 0x00000558", "al_r8_0x00000558"},
- {{al, r8, 0x00000ab0}, false, al, "al r8 0x00000ab0", "al_r8_0x00000ab0"},
- {{al, r8, 0x00001560}, false, al, "al r8 0x00001560", "al_r8_0x00001560"},
- {{al, r8, 0x00002ac0}, false, al, "al r8 0x00002ac0", "al_r8_0x00002ac0"},
- {{al, r8, 0x00005580}, false, al, "al r8 0x00005580", "al_r8_0x00005580"},
- {{al, r8, 0x0000ab00}, false, al, "al r8 0x0000ab00", "al_r8_0x0000ab00"},
- {{al, r8, 0x00015600}, false, al, "al r8 0x00015600", "al_r8_0x00015600"},
- {{al, r8, 0x0002ac00}, false, al, "al r8 0x0002ac00", "al_r8_0x0002ac00"},
- {{al, r8, 0x00055800}, false, al, "al r8 0x00055800", "al_r8_0x00055800"},
- {{al, r8, 0x000ab000}, false, al, "al r8 0x000ab000", "al_r8_0x000ab000"},
- {{al, r8, 0x00156000}, false, al, "al r8 0x00156000", "al_r8_0x00156000"},
- {{al, r8, 0x002ac000}, false, al, "al r8 0x002ac000", "al_r8_0x002ac000"},
- {{al, r8, 0x00558000}, false, al, "al r8 0x00558000", "al_r8_0x00558000"},
- {{al, r8, 0x00ab0000}, false, al, "al r8 0x00ab0000", "al_r8_0x00ab0000"},
- {{al, r8, 0x01560000}, false, al, "al r8 0x01560000", "al_r8_0x01560000"},
- {{al, r8, 0x02ac0000}, false, al, "al r8 0x02ac0000", "al_r8_0x02ac0000"},
- {{al, r8, 0x05580000}, false, al, "al r8 0x05580000", "al_r8_0x05580000"},
- {{al, r8, 0x0ab00000}, false, al, "al r8 0x0ab00000", "al_r8_0x0ab00000"},
- {{al, r8, 0x15600000}, false, al, "al r8 0x15600000", "al_r8_0x15600000"},
- {{al, r8, 0x2ac00000}, false, al, "al r8 0x2ac00000", "al_r8_0x2ac00000"},
- {{al, r8, 0x55800000}, false, al, "al r8 0x55800000", "al_r8_0x55800000"},
- {{al, r8, 0xab000000}, false, al, "al r8 0xab000000", "al_r8_0xab000000"},
- {{al, r8, 0x000000ab}, false, al, "al r8 0x000000ab", "al_r8_0x000000ab"},
- {{al, r8, 0x00ab00ab}, false, al, "al r8 0x00ab00ab", "al_r8_0x00ab00ab"},
- {{al, r8, 0xab00ab00}, false, al, "al r8 0xab00ab00", "al_r8_0xab00ab00"},
- {{al, r8, 0xabababab}, false, al, "al r8 0xabababab", "al_r8_0xabababab"},
- {{al, r9, 0x000001fe}, false, al, "al r9 0x000001fe", "al_r9_0x000001fe"},
- {{al, r9, 0x000003fc}, false, al, "al r9 0x000003fc", "al_r9_0x000003fc"},
- {{al, r9, 0x000007f8}, false, al, "al r9 0x000007f8", "al_r9_0x000007f8"},
- {{al, r9, 0x00000ff0}, false, al, "al r9 0x00000ff0", "al_r9_0x00000ff0"},
- {{al, r9, 0x00001fe0}, false, al, "al r9 0x00001fe0", "al_r9_0x00001fe0"},
- {{al, r9, 0x00003fc0}, false, al, "al r9 0x00003fc0", "al_r9_0x00003fc0"},
- {{al, r9, 0x00007f80}, false, al, "al r9 0x00007f80", "al_r9_0x00007f80"},
- {{al, r9, 0x0000ff00}, false, al, "al r9 0x0000ff00", "al_r9_0x0000ff00"},
- {{al, r9, 0x0001fe00}, false, al, "al r9 0x0001fe00", "al_r9_0x0001fe00"},
- {{al, r9, 0x0003fc00}, false, al, "al r9 0x0003fc00", "al_r9_0x0003fc00"},
- {{al, r9, 0x0007f800}, false, al, "al r9 0x0007f800", "al_r9_0x0007f800"},
- {{al, r9, 0x000ff000}, false, al, "al r9 0x000ff000", "al_r9_0x000ff000"},
- {{al, r9, 0x001fe000}, false, al, "al r9 0x001fe000", "al_r9_0x001fe000"},
- {{al, r9, 0x003fc000}, false, al, "al r9 0x003fc000", "al_r9_0x003fc000"},
- {{al, r9, 0x007f8000}, false, al, "al r9 0x007f8000", "al_r9_0x007f8000"},
- {{al, r9, 0x00ff0000}, false, al, "al r9 0x00ff0000", "al_r9_0x00ff0000"},
- {{al, r9, 0x01fe0000}, false, al, "al r9 0x01fe0000", "al_r9_0x01fe0000"},
- {{al, r9, 0x03fc0000}, false, al, "al r9 0x03fc0000", "al_r9_0x03fc0000"},
- {{al, r9, 0x07f80000}, false, al, "al r9 0x07f80000", "al_r9_0x07f80000"},
- {{al, r9, 0x0ff00000}, false, al, "al r9 0x0ff00000", "al_r9_0x0ff00000"},
- {{al, r9, 0x1fe00000}, false, al, "al r9 0x1fe00000", "al_r9_0x1fe00000"},
- {{al, r9, 0x3fc00000}, false, al, "al r9 0x3fc00000", "al_r9_0x3fc00000"},
- {{al, r9, 0x7f800000}, false, al, "al r9 0x7f800000", "al_r9_0x7f800000"},
- {{al, r9, 0xff000000}, false, al, "al r9 0xff000000", "al_r9_0xff000000"},
- {{al, r9, 0x000000ff}, false, al, "al r9 0x000000ff", "al_r9_0x000000ff"},
- {{al, r9, 0x00ff00ff}, false, al, "al r9 0x00ff00ff", "al_r9_0x00ff00ff"},
- {{al, r9, 0xff00ff00}, false, al, "al r9 0xff00ff00", "al_r9_0xff00ff00"},
- {{al, r9, 0xffffffff}, false, al, "al r9 0xffffffff", "al_r9_0xffffffff"},
- {{al, r9, 0x00000156}, false, al, "al r9 0x00000156", "al_r9_0x00000156"},
- {{al, r9, 0x000002ac}, false, al, "al r9 0x000002ac", "al_r9_0x000002ac"},
- {{al, r9, 0x00000558}, false, al, "al r9 0x00000558", "al_r9_0x00000558"},
- {{al, r9, 0x00000ab0}, false, al, "al r9 0x00000ab0", "al_r9_0x00000ab0"},
- {{al, r9, 0x00001560}, false, al, "al r9 0x00001560", "al_r9_0x00001560"},
- {{al, r9, 0x00002ac0}, false, al, "al r9 0x00002ac0", "al_r9_0x00002ac0"},
- {{al, r9, 0x00005580}, false, al, "al r9 0x00005580", "al_r9_0x00005580"},
- {{al, r9, 0x0000ab00}, false, al, "al r9 0x0000ab00", "al_r9_0x0000ab00"},
- {{al, r9, 0x00015600}, false, al, "al r9 0x00015600", "al_r9_0x00015600"},
- {{al, r9, 0x0002ac00}, false, al, "al r9 0x0002ac00", "al_r9_0x0002ac00"},
- {{al, r9, 0x00055800}, false, al, "al r9 0x00055800", "al_r9_0x00055800"},
- {{al, r9, 0x000ab000}, false, al, "al r9 0x000ab000", "al_r9_0x000ab000"},
- {{al, r9, 0x00156000}, false, al, "al r9 0x00156000", "al_r9_0x00156000"},
- {{al, r9, 0x002ac000}, false, al, "al r9 0x002ac000", "al_r9_0x002ac000"},
- {{al, r9, 0x00558000}, false, al, "al r9 0x00558000", "al_r9_0x00558000"},
- {{al, r9, 0x00ab0000}, false, al, "al r9 0x00ab0000", "al_r9_0x00ab0000"},
- {{al, r9, 0x01560000}, false, al, "al r9 0x01560000", "al_r9_0x01560000"},
- {{al, r9, 0x02ac0000}, false, al, "al r9 0x02ac0000", "al_r9_0x02ac0000"},
- {{al, r9, 0x05580000}, false, al, "al r9 0x05580000", "al_r9_0x05580000"},
- {{al, r9, 0x0ab00000}, false, al, "al r9 0x0ab00000", "al_r9_0x0ab00000"},
- {{al, r9, 0x15600000}, false, al, "al r9 0x15600000", "al_r9_0x15600000"},
- {{al, r9, 0x2ac00000}, false, al, "al r9 0x2ac00000", "al_r9_0x2ac00000"},
- {{al, r9, 0x55800000}, false, al, "al r9 0x55800000", "al_r9_0x55800000"},
- {{al, r9, 0xab000000}, false, al, "al r9 0xab000000", "al_r9_0xab000000"},
- {{al, r9, 0x000000ab}, false, al, "al r9 0x000000ab", "al_r9_0x000000ab"},
- {{al, r9, 0x00ab00ab}, false, al, "al r9 0x00ab00ab", "al_r9_0x00ab00ab"},
- {{al, r9, 0xab00ab00}, false, al, "al r9 0xab00ab00", "al_r9_0xab00ab00"},
- {{al, r9, 0xabababab}, false, al, "al r9 0xabababab", "al_r9_0xabababab"},
- {{al, r10, 0x000001fe}, false, al, "al r10 0x000001fe", "al_r10_"
- "0x000001fe"},
- {{al, r10, 0x000003fc}, false, al, "al r10 0x000003fc", "al_r10_"
- "0x000003fc"},
- {{al, r10, 0x000007f8}, false, al, "al r10 0x000007f8", "al_r10_"
- "0x000007f8"},
- {{al, r10, 0x00000ff0}, false, al, "al r10 0x00000ff0", "al_r10_"
- "0x00000ff0"},
- {{al, r10, 0x00001fe0}, false, al, "al r10 0x00001fe0", "al_r10_"
- "0x00001fe0"},
- {{al, r10, 0x00003fc0}, false, al, "al r10 0x00003fc0", "al_r10_"
- "0x00003fc0"},
- {{al, r10, 0x00007f80}, false, al, "al r10 0x00007f80", "al_r10_"
- "0x00007f80"},
- {{al, r10, 0x0000ff00}, false, al, "al r10 0x0000ff00", "al_r10_"
- "0x0000ff00"},
- {{al, r10, 0x0001fe00}, false, al, "al r10 0x0001fe00", "al_r10_"
- "0x0001fe00"},
- {{al, r10, 0x0003fc00}, false, al, "al r10 0x0003fc00", "al_r10_"
- "0x0003fc00"},
- {{al, r10, 0x0007f800}, false, al, "al r10 0x0007f800", "al_r10_"
- "0x0007f800"},
- {{al, r10, 0x000ff000}, false, al, "al r10 0x000ff000", "al_r10_"
- "0x000ff000"},
- {{al, r10, 0x001fe000}, false, al, "al r10 0x001fe000", "al_r10_"
- "0x001fe000"},
- {{al, r10, 0x003fc000}, false, al, "al r10 0x003fc000", "al_r10_"
- "0x003fc000"},
- {{al, r10, 0x007f8000}, false, al, "al r10 0x007f8000", "al_r10_"
- "0x007f8000"},
- {{al, r10, 0x00ff0000}, false, al, "al r10 0x00ff0000", "al_r10_"
- "0x00ff0000"},
- {{al, r10, 0x01fe0000}, false, al, "al r10 0x01fe0000", "al_r10_"
- "0x01fe0000"},
- {{al, r10, 0x03fc0000}, false, al, "al r10 0x03fc0000", "al_r10_"
- "0x03fc0000"},
- {{al, r10, 0x07f80000}, false, al, "al r10 0x07f80000", "al_r10_"
- "0x07f80000"},
- {{al, r10, 0x0ff00000}, false, al, "al r10 0x0ff00000", "al_r10_"
- "0x0ff00000"},
- {{al, r10, 0x1fe00000}, false, al, "al r10 0x1fe00000", "al_r10_"
- "0x1fe00000"},
- {{al, r10, 0x3fc00000}, false, al, "al r10 0x3fc00000", "al_r10_"
- "0x3fc00000"},
- {{al, r10, 0x7f800000}, false, al, "al r10 0x7f800000", "al_r10_"
- "0x7f800000"},
- {{al, r10, 0xff000000}, false, al, "al r10 0xff000000", "al_r10_"
- "0xff000000"},
- {{al, r10, 0x000000ff}, false, al, "al r10 0x000000ff", "al_r10_"
- "0x000000ff"},
- {{al, r10, 0x00ff00ff}, false, al, "al r10 0x00ff00ff", "al_r10_"
- "0x00ff00ff"},
- {{al, r10, 0xff00ff00}, false, al, "al r10 0xff00ff00", "al_r10_"
- "0xff00ff00"},
- {{al, r10, 0xffffffff}, false, al, "al r10 0xffffffff", "al_r10_"
- "0xffffffff"},
- {{al, r10, 0x00000156}, false, al, "al r10 0x00000156", "al_r10_"
- "0x00000156"},
- {{al, r10, 0x000002ac}, false, al, "al r10 0x000002ac", "al_r10_"
- "0x000002ac"},
- {{al, r10, 0x00000558}, false, al, "al r10 0x00000558", "al_r10_"
- "0x00000558"},
- {{al, r10, 0x00000ab0}, false, al, "al r10 0x00000ab0", "al_r10_"
- "0x00000ab0"},
- {{al, r10, 0x00001560}, false, al, "al r10 0x00001560", "al_r10_"
- "0x00001560"},
- {{al, r10, 0x00002ac0}, false, al, "al r10 0x00002ac0", "al_r10_"
- "0x00002ac0"},
- {{al, r10, 0x00005580}, false, al, "al r10 0x00005580", "al_r10_"
- "0x00005580"},
- {{al, r10, 0x0000ab00}, false, al, "al r10 0x0000ab00", "al_r10_"
- "0x0000ab00"},
- {{al, r10, 0x00015600}, false, al, "al r10 0x00015600", "al_r10_"
- "0x00015600"},
- {{al, r10, 0x0002ac00}, false, al, "al r10 0x0002ac00", "al_r10_"
- "0x0002ac00"},
- {{al, r10, 0x00055800}, false, al, "al r10 0x00055800", "al_r10_"
- "0x00055800"},
- {{al, r10, 0x000ab000}, false, al, "al r10 0x000ab000", "al_r10_"
- "0x000ab000"},
- {{al, r10, 0x00156000}, false, al, "al r10 0x00156000", "al_r10_"
- "0x00156000"},
- {{al, r10, 0x002ac000}, false, al, "al r10 0x002ac000", "al_r10_"
- "0x002ac000"},
- {{al, r10, 0x00558000}, false, al, "al r10 0x00558000", "al_r10_"
- "0x00558000"},
- {{al, r10, 0x00ab0000}, false, al, "al r10 0x00ab0000", "al_r10_"
- "0x00ab0000"},
- {{al, r10, 0x01560000}, false, al, "al r10 0x01560000", "al_r10_"
- "0x01560000"},
- {{al, r10, 0x02ac0000}, false, al, "al r10 0x02ac0000", "al_r10_"
- "0x02ac0000"},
- {{al, r10, 0x05580000}, false, al, "al r10 0x05580000", "al_r10_"
- "0x05580000"},
- {{al, r10, 0x0ab00000}, false, al, "al r10 0x0ab00000", "al_r10_"
- "0x0ab00000"},
- {{al, r10, 0x15600000}, false, al, "al r10 0x15600000", "al_r10_"
- "0x15600000"},
- {{al, r10, 0x2ac00000}, false, al, "al r10 0x2ac00000", "al_r10_"
- "0x2ac00000"},
- {{al, r10, 0x55800000}, false, al, "al r10 0x55800000", "al_r10_"
- "0x55800000"},
- {{al, r10, 0xab000000}, false, al, "al r10 0xab000000", "al_r10_"
- "0xab000000"},
- {{al, r10, 0x000000ab}, false, al, "al r10 0x000000ab", "al_r10_"
- "0x000000ab"},
- {{al, r10, 0x00ab00ab}, false, al, "al r10 0x00ab00ab", "al_r10_"
- "0x00ab00ab"},
- {{al, r10, 0xab00ab00}, false, al, "al r10 0xab00ab00", "al_r10_"
- "0xab00ab00"},
- {{al, r10, 0xabababab}, false, al, "al r10 0xabababab", "al_r10_"
- "0xabababab"},
- {{al, r11, 0x000001fe}, false, al, "al r11 0x000001fe", "al_r11_"
- "0x000001fe"},
- {{al, r11, 0x000003fc}, false, al, "al r11 0x000003fc", "al_r11_"
- "0x000003fc"},
- {{al, r11, 0x000007f8}, false, al, "al r11 0x000007f8", "al_r11_"
- "0x000007f8"},
- {{al, r11, 0x00000ff0}, false, al, "al r11 0x00000ff0", "al_r11_"
- "0x00000ff0"},
- {{al, r11, 0x00001fe0}, false, al, "al r11 0x00001fe0", "al_r11_"
- "0x00001fe0"},
- {{al, r11, 0x00003fc0}, false, al, "al r11 0x00003fc0", "al_r11_"
- "0x00003fc0"},
- {{al, r11, 0x00007f80}, false, al, "al r11 0x00007f80", "al_r11_"
- "0x00007f80"},
- {{al, r11, 0x0000ff00}, false, al, "al r11 0x0000ff00", "al_r11_"
- "0x0000ff00"},
- {{al, r11, 0x0001fe00}, false, al, "al r11 0x0001fe00", "al_r11_"
- "0x0001fe00"},
- {{al, r11, 0x0003fc00}, false, al, "al r11 0x0003fc00", "al_r11_"
- "0x0003fc00"},
- {{al, r11, 0x0007f800}, false, al, "al r11 0x0007f800", "al_r11_"
- "0x0007f800"},
- {{al, r11, 0x000ff000}, false, al, "al r11 0x000ff000", "al_r11_"
- "0x000ff000"},
- {{al, r11, 0x001fe000}, false, al, "al r11 0x001fe000", "al_r11_"
- "0x001fe000"},
- {{al, r11, 0x003fc000}, false, al, "al r11 0x003fc000", "al_r11_"
- "0x003fc000"},
- {{al, r11, 0x007f8000}, false, al, "al r11 0x007f8000", "al_r11_"
- "0x007f8000"},
- {{al, r11, 0x00ff0000}, false, al, "al r11 0x00ff0000", "al_r11_"
- "0x00ff0000"},
- {{al, r11, 0x01fe0000}, false, al, "al r11 0x01fe0000", "al_r11_"
- "0x01fe0000"},
- {{al, r11, 0x03fc0000}, false, al, "al r11 0x03fc0000", "al_r11_"
- "0x03fc0000"},
- {{al, r11, 0x07f80000}, false, al, "al r11 0x07f80000", "al_r11_"
- "0x07f80000"},
- {{al, r11, 0x0ff00000}, false, al, "al r11 0x0ff00000", "al_r11_"
- "0x0ff00000"},
- {{al, r11, 0x1fe00000}, false, al, "al r11 0x1fe00000", "al_r11_"
- "0x1fe00000"},
- {{al, r11, 0x3fc00000}, false, al, "al r11 0x3fc00000", "al_r11_"
- "0x3fc00000"},
- {{al, r11, 0x7f800000}, false, al, "al r11 0x7f800000", "al_r11_"
- "0x7f800000"},
- {{al, r11, 0xff000000}, false, al, "al r11 0xff000000", "al_r11_"
- "0xff000000"},
- {{al, r11, 0x000000ff}, false, al, "al r11 0x000000ff", "al_r11_"
- "0x000000ff"},
- {{al, r11, 0x00ff00ff}, false, al, "al r11 0x00ff00ff", "al_r11_"
- "0x00ff00ff"},
- {{al, r11, 0xff00ff00}, false, al, "al r11 0xff00ff00", "al_r11_"
- "0xff00ff00"},
- {{al, r11, 0xffffffff}, false, al, "al r11 0xffffffff", "al_r11_"
- "0xffffffff"},
- {{al, r11, 0x00000156}, false, al, "al r11 0x00000156", "al_r11_"
- "0x00000156"},
- {{al, r11, 0x000002ac}, false, al, "al r11 0x000002ac", "al_r11_"
- "0x000002ac"},
- {{al, r11, 0x00000558}, false, al, "al r11 0x00000558", "al_r11_"
- "0x00000558"},
- {{al, r11, 0x00000ab0}, false, al, "al r11 0x00000ab0", "al_r11_"
- "0x00000ab0"},
- {{al, r11, 0x00001560}, false, al, "al r11 0x00001560", "al_r11_"
- "0x00001560"},
- {{al, r11, 0x00002ac0}, false, al, "al r11 0x00002ac0", "al_r11_"
- "0x00002ac0"},
- {{al, r11, 0x00005580}, false, al, "al r11 0x00005580", "al_r11_"
- "0x00005580"},
- {{al, r11, 0x0000ab00}, false, al, "al r11 0x0000ab00", "al_r11_"
- "0x0000ab00"},
- {{al, r11, 0x00015600}, false, al, "al r11 0x00015600", "al_r11_"
- "0x00015600"},
- {{al, r11, 0x0002ac00}, false, al, "al r11 0x0002ac00", "al_r11_"
- "0x0002ac00"},
- {{al, r11, 0x00055800}, false, al, "al r11 0x00055800", "al_r11_"
- "0x00055800"},
- {{al, r11, 0x000ab000}, false, al, "al r11 0x000ab000", "al_r11_"
- "0x000ab000"},
- {{al, r11, 0x00156000}, false, al, "al r11 0x00156000", "al_r11_"
- "0x00156000"},
- {{al, r11, 0x002ac000}, false, al, "al r11 0x002ac000", "al_r11_"
- "0x002ac000"},
- {{al, r11, 0x00558000}, false, al, "al r11 0x00558000", "al_r11_"
- "0x00558000"},
- {{al, r11, 0x00ab0000}, false, al, "al r11 0x00ab0000", "al_r11_"
- "0x00ab0000"},
- {{al, r11, 0x01560000}, false, al, "al r11 0x01560000", "al_r11_"
- "0x01560000"},
- {{al, r11, 0x02ac0000}, false, al, "al r11 0x02ac0000", "al_r11_"
- "0x02ac0000"},
- {{al, r11, 0x05580000}, false, al, "al r11 0x05580000", "al_r11_"
- "0x05580000"},
- {{al, r11, 0x0ab00000}, false, al, "al r11 0x0ab00000", "al_r11_"
- "0x0ab00000"},
- {{al, r11, 0x15600000}, false, al, "al r11 0x15600000", "al_r11_"
- "0x15600000"},
- {{al, r11, 0x2ac00000}, false, al, "al r11 0x2ac00000", "al_r11_"
- "0x2ac00000"},
- {{al, r11, 0x55800000}, false, al, "al r11 0x55800000", "al_r11_"
- "0x55800000"},
- {{al, r11, 0xab000000}, false, al, "al r11 0xab000000", "al_r11_"
- "0xab000000"},
- {{al, r11, 0x000000ab}, false, al, "al r11 0x000000ab", "al_r11_"
- "0x000000ab"},
- {{al, r11, 0x00ab00ab}, false, al, "al r11 0x00ab00ab", "al_r11_"
- "0x00ab00ab"},
- {{al, r11, 0xab00ab00}, false, al, "al r11 0xab00ab00", "al_r11_"
- "0xab00ab00"},
- {{al, r11, 0xabababab}, false, al, "al r11 0xabababab", "al_r11_"
- "0xabababab"},
- {{al, r12, 0x000001fe}, false, al, "al r12 0x000001fe", "al_r12_"
- "0x000001fe"},
- {{al, r12, 0x000003fc}, false, al, "al r12 0x000003fc", "al_r12_"
- "0x000003fc"},
- {{al, r12, 0x000007f8}, false, al, "al r12 0x000007f8", "al_r12_"
- "0x000007f8"},
- {{al, r12, 0x00000ff0}, false, al, "al r12 0x00000ff0", "al_r12_"
- "0x00000ff0"},
- {{al, r12, 0x00001fe0}, false, al, "al r12 0x00001fe0", "al_r12_"
- "0x00001fe0"},
- {{al, r12, 0x00003fc0}, false, al, "al r12 0x00003fc0", "al_r12_"
- "0x00003fc0"},
- {{al, r12, 0x00007f80}, false, al, "al r12 0x00007f80", "al_r12_"
- "0x00007f80"},
- {{al, r12, 0x0000ff00}, false, al, "al r12 0x0000ff00", "al_r12_"
- "0x0000ff00"},
- {{al, r12, 0x0001fe00}, false, al, "al r12 0x0001fe00", "al_r12_"
- "0x0001fe00"},
- {{al, r12, 0x0003fc00}, false, al, "al r12 0x0003fc00", "al_r12_"
- "0x0003fc00"},
- {{al, r12, 0x0007f800}, false, al, "al r12 0x0007f800", "al_r12_"
- "0x0007f800"},
- {{al, r12, 0x000ff000}, false, al, "al r12 0x000ff000", "al_r12_"
- "0x000ff000"},
- {{al, r12, 0x001fe000}, false, al, "al r12 0x001fe000", "al_r12_"
- "0x001fe000"},
- {{al, r12, 0x003fc000}, false, al, "al r12 0x003fc000", "al_r12_"
- "0x003fc000"},
- {{al, r12, 0x007f8000}, false, al, "al r12 0x007f8000", "al_r12_"
- "0x007f8000"},
- {{al, r12, 0x00ff0000}, false, al, "al r12 0x00ff0000", "al_r12_"
- "0x00ff0000"},
- {{al, r12, 0x01fe0000}, false, al, "al r12 0x01fe0000", "al_r12_"
- "0x01fe0000"},
- {{al, r12, 0x03fc0000}, false, al, "al r12 0x03fc0000", "al_r12_"
- "0x03fc0000"},
- {{al, r12, 0x07f80000}, false, al, "al r12 0x07f80000", "al_r12_"
- "0x07f80000"},
- {{al, r12, 0x0ff00000}, false, al, "al r12 0x0ff00000", "al_r12_"
- "0x0ff00000"},
- {{al, r12, 0x1fe00000}, false, al, "al r12 0x1fe00000", "al_r12_"
- "0x1fe00000"},
- {{al, r12, 0x3fc00000}, false, al, "al r12 0x3fc00000", "al_r12_"
- "0x3fc00000"},
- {{al, r12, 0x7f800000}, false, al, "al r12 0x7f800000", "al_r12_"
- "0x7f800000"},
- {{al, r12, 0xff000000}, false, al, "al r12 0xff000000", "al_r12_"
- "0xff000000"},
- {{al, r12, 0x000000ff}, false, al, "al r12 0x000000ff", "al_r12_"
- "0x000000ff"},
- {{al, r12, 0x00ff00ff}, false, al, "al r12 0x00ff00ff", "al_r12_"
- "0x00ff00ff"},
- {{al, r12, 0xff00ff00}, false, al, "al r12 0xff00ff00", "al_r12_"
- "0xff00ff00"},
- {{al, r12, 0xffffffff}, false, al, "al r12 0xffffffff", "al_r12_"
- "0xffffffff"},
- {{al, r12, 0x00000156}, false, al, "al r12 0x00000156", "al_r12_"
- "0x00000156"},
- {{al, r12, 0x000002ac}, false, al, "al r12 0x000002ac", "al_r12_"
- "0x000002ac"},
- {{al, r12, 0x00000558}, false, al, "al r12 0x00000558", "al_r12_"
- "0x00000558"},
- {{al, r12, 0x00000ab0}, false, al, "al r12 0x00000ab0", "al_r12_"
- "0x00000ab0"},
- {{al, r12, 0x00001560}, false, al, "al r12 0x00001560", "al_r12_"
- "0x00001560"},
- {{al, r12, 0x00002ac0}, false, al, "al r12 0x00002ac0", "al_r12_"
- "0x00002ac0"},
- {{al, r12, 0x00005580}, false, al, "al r12 0x00005580", "al_r12_"
- "0x00005580"},
- {{al, r12, 0x0000ab00}, false, al, "al r12 0x0000ab00", "al_r12_"
- "0x0000ab00"},
- {{al, r12, 0x00015600}, false, al, "al r12 0x00015600", "al_r12_"
- "0x00015600"},
- {{al, r12, 0x0002ac00}, false, al, "al r12 0x0002ac00", "al_r12_"
- "0x0002ac00"},
- {{al, r12, 0x00055800}, false, al, "al r12 0x00055800", "al_r12_"
- "0x00055800"},
- {{al, r12, 0x000ab000}, false, al, "al r12 0x000ab000", "al_r12_"
- "0x000ab000"},
- {{al, r12, 0x00156000}, false, al, "al r12 0x00156000", "al_r12_"
- "0x00156000"},
- {{al, r12, 0x002ac000}, false, al, "al r12 0x002ac000", "al_r12_"
- "0x002ac000"},
- {{al, r12, 0x00558000}, false, al, "al r12 0x00558000", "al_r12_"
- "0x00558000"},
- {{al, r12, 0x00ab0000}, false, al, "al r12 0x00ab0000", "al_r12_"
- "0x00ab0000"},
- {{al, r12, 0x01560000}, false, al, "al r12 0x01560000", "al_r12_"
- "0x01560000"},
- {{al, r12, 0x02ac0000}, false, al, "al r12 0x02ac0000", "al_r12_"
- "0x02ac0000"},
- {{al, r12, 0x05580000}, false, al, "al r12 0x05580000", "al_r12_"
- "0x05580000"},
- {{al, r12, 0x0ab00000}, false, al, "al r12 0x0ab00000", "al_r12_"
- "0x0ab00000"},
- {{al, r12, 0x15600000}, false, al, "al r12 0x15600000", "al_r12_"
- "0x15600000"},
- {{al, r12, 0x2ac00000}, false, al, "al r12 0x2ac00000", "al_r12_"
- "0x2ac00000"},
- {{al, r12, 0x55800000}, false, al, "al r12 0x55800000", "al_r12_"
- "0x55800000"},
- {{al, r12, 0xab000000}, false, al, "al r12 0xab000000", "al_r12_"
- "0xab000000"},
- {{al, r12, 0x000000ab}, false, al, "al r12 0x000000ab", "al_r12_"
- "0x000000ab"},
- {{al, r12, 0x00ab00ab}, false, al, "al r12 0x00ab00ab", "al_r12_"
- "0x00ab00ab"},
- {{al, r12, 0xab00ab00}, false, al, "al r12 0xab00ab00", "al_r12_"
- "0xab00ab00"},
- {{al, r12, 0xabababab}, false, al, "al r12 0xabababab", "al_r12_"
- "0xabababab"},
- {{al, r13, 0x000001fe}, false, al, "al r13 0x000001fe", "al_r13_"
- "0x000001fe"},
- {{al, r13, 0x000003fc}, false, al, "al r13 0x000003fc", "al_r13_"
- "0x000003fc"},
- {{al, r13, 0x000007f8}, false, al, "al r13 0x000007f8", "al_r13_"
- "0x000007f8"},
- {{al, r13, 0x00000ff0}, false, al, "al r13 0x00000ff0", "al_r13_"
- "0x00000ff0"},
- {{al, r13, 0x00001fe0}, false, al, "al r13 0x00001fe0", "al_r13_"
- "0x00001fe0"},
- {{al, r13, 0x00003fc0}, false, al, "al r13 0x00003fc0", "al_r13_"
- "0x00003fc0"},
- {{al, r13, 0x00007f80}, false, al, "al r13 0x00007f80", "al_r13_"
- "0x00007f80"},
- {{al, r13, 0x0000ff00}, false, al, "al r13 0x0000ff00", "al_r13_"
- "0x0000ff00"},
- {{al, r13, 0x0001fe00}, false, al, "al r13 0x0001fe00", "al_r13_"
- "0x0001fe00"},
- {{al, r13, 0x0003fc00}, false, al, "al r13 0x0003fc00", "al_r13_"
- "0x0003fc00"},
- {{al, r13, 0x0007f800}, false, al, "al r13 0x0007f800", "al_r13_"
- "0x0007f800"},
- {{al, r13, 0x000ff000}, false, al, "al r13 0x000ff000", "al_r13_"
- "0x000ff000"},
- {{al, r13, 0x001fe000}, false, al, "al r13 0x001fe000", "al_r13_"
- "0x001fe000"},
- {{al, r13, 0x003fc000}, false, al, "al r13 0x003fc000", "al_r13_"
- "0x003fc000"},
- {{al, r13, 0x007f8000}, false, al, "al r13 0x007f8000", "al_r13_"
- "0x007f8000"},
- {{al, r13, 0x00ff0000}, false, al, "al r13 0x00ff0000", "al_r13_"
- "0x00ff0000"},
- {{al, r13, 0x01fe0000}, false, al, "al r13 0x01fe0000", "al_r13_"
- "0x01fe0000"},
- {{al, r13, 0x03fc0000}, false, al, "al r13 0x03fc0000", "al_r13_"
- "0x03fc0000"},
- {{al, r13, 0x07f80000}, false, al, "al r13 0x07f80000", "al_r13_"
- "0x07f80000"},
- {{al, r13, 0x0ff00000}, false, al, "al r13 0x0ff00000", "al_r13_"
- "0x0ff00000"},
- {{al, r13, 0x1fe00000}, false, al, "al r13 0x1fe00000", "al_r13_"
- "0x1fe00000"},
- {{al, r13, 0x3fc00000}, false, al, "al r13 0x3fc00000", "al_r13_"
- "0x3fc00000"},
- {{al, r13, 0x7f800000}, false, al, "al r13 0x7f800000", "al_r13_"
- "0x7f800000"},
- {{al, r13, 0xff000000}, false, al, "al r13 0xff000000", "al_r13_"
- "0xff000000"},
- {{al, r13, 0x000000ff}, false, al, "al r13 0x000000ff", "al_r13_"
- "0x000000ff"},
- {{al, r13, 0x00ff00ff}, false, al, "al r13 0x00ff00ff", "al_r13_"
- "0x00ff00ff"},
- {{al, r13, 0xff00ff00}, false, al, "al r13 0xff00ff00", "al_r13_"
- "0xff00ff00"},
- {{al, r13, 0xffffffff}, false, al, "al r13 0xffffffff", "al_r13_"
- "0xffffffff"},
- {{al, r13, 0x00000156}, false, al, "al r13 0x00000156", "al_r13_"
- "0x00000156"},
- {{al, r13, 0x000002ac}, false, al, "al r13 0x000002ac", "al_r13_"
- "0x000002ac"},
- {{al, r13, 0x00000558}, false, al, "al r13 0x00000558", "al_r13_"
- "0x00000558"},
- {{al, r13, 0x00000ab0}, false, al, "al r13 0x00000ab0", "al_r13_"
- "0x00000ab0"},
- {{al, r13, 0x00001560}, false, al, "al r13 0x00001560", "al_r13_"
- "0x00001560"},
- {{al, r13, 0x00002ac0}, false, al, "al r13 0x00002ac0", "al_r13_"
- "0x00002ac0"},
- {{al, r13, 0x00005580}, false, al, "al r13 0x00005580", "al_r13_"
- "0x00005580"},
- {{al, r13, 0x0000ab00}, false, al, "al r13 0x0000ab00", "al_r13_"
- "0x0000ab00"},
- {{al, r13, 0x00015600}, false, al, "al r13 0x00015600", "al_r13_"
- "0x00015600"},
- {{al, r13, 0x0002ac00}, false, al, "al r13 0x0002ac00", "al_r13_"
- "0x0002ac00"},
- {{al, r13, 0x00055800}, false, al, "al r13 0x00055800", "al_r13_"
- "0x00055800"},
- {{al, r13, 0x000ab000}, false, al, "al r13 0x000ab000", "al_r13_"
- "0x000ab000"},
- {{al, r13, 0x00156000}, false, al, "al r13 0x00156000", "al_r13_"
- "0x00156000"},
- {{al, r13, 0x002ac000}, false, al, "al r13 0x002ac000", "al_r13_"
- "0x002ac000"},
- {{al, r13, 0x00558000}, false, al, "al r13 0x00558000", "al_r13_"
- "0x00558000"},
- {{al, r13, 0x00ab0000}, false, al, "al r13 0x00ab0000", "al_r13_"
- "0x00ab0000"},
- {{al, r13, 0x01560000}, false, al, "al r13 0x01560000", "al_r13_"
- "0x01560000"},
- {{al, r13, 0x02ac0000}, false, al, "al r13 0x02ac0000", "al_r13_"
- "0x02ac0000"},
- {{al, r13, 0x05580000}, false, al, "al r13 0x05580000", "al_r13_"
- "0x05580000"},
- {{al, r13, 0x0ab00000}, false, al, "al r13 0x0ab00000", "al_r13_"
- "0x0ab00000"},
- {{al, r13, 0x15600000}, false, al, "al r13 0x15600000", "al_r13_"
- "0x15600000"},
- {{al, r13, 0x2ac00000}, false, al, "al r13 0x2ac00000", "al_r13_"
- "0x2ac00000"},
- {{al, r13, 0x55800000}, false, al, "al r13 0x55800000", "al_r13_"
- "0x55800000"},
- {{al, r13, 0xab000000}, false, al, "al r13 0xab000000", "al_r13_"
- "0xab000000"},
- {{al, r13, 0x000000ab}, false, al, "al r13 0x000000ab", "al_r13_"
- "0x000000ab"},
- {{al, r13, 0x00ab00ab}, false, al, "al r13 0x00ab00ab", "al_r13_"
- "0x00ab00ab"},
- {{al, r13, 0xab00ab00}, false, al, "al r13 0xab00ab00", "al_r13_"
- "0xab00ab00"},
- {{al, r13, 0xabababab}, false, al, "al r13 0xabababab", "al_r13_"
- "0xabababab"},
- {{al, r14, 0x000001fe}, false, al, "al r14 0x000001fe", "al_r14_"
- "0x000001fe"},
- {{al, r14, 0x000003fc}, false, al, "al r14 0x000003fc", "al_r14_"
- "0x000003fc"},
- {{al, r14, 0x000007f8}, false, al, "al r14 0x000007f8", "al_r14_"
- "0x000007f8"},
- {{al, r14, 0x00000ff0}, false, al, "al r14 0x00000ff0", "al_r14_"
- "0x00000ff0"},
- {{al, r14, 0x00001fe0}, false, al, "al r14 0x00001fe0", "al_r14_"
- "0x00001fe0"},
- {{al, r14, 0x00003fc0}, false, al, "al r14 0x00003fc0", "al_r14_"
- "0x00003fc0"},
- {{al, r14, 0x00007f80}, false, al, "al r14 0x00007f80", "al_r14_"
- "0x00007f80"},
- {{al, r14, 0x0000ff00}, false, al, "al r14 0x0000ff00", "al_r14_"
- "0x0000ff00"},
- {{al, r14, 0x0001fe00}, false, al, "al r14 0x0001fe00", "al_r14_"
- "0x0001fe00"},
- {{al, r14, 0x0003fc00}, false, al, "al r14 0x0003fc00", "al_r14_"
- "0x0003fc00"},
- {{al, r14, 0x0007f800}, false, al, "al r14 0x0007f800", "al_r14_"
- "0x0007f800"},
- {{al, r14, 0x000ff000}, false, al, "al r14 0x000ff000", "al_r14_"
- "0x000ff000"},
- {{al, r14, 0x001fe000}, false, al, "al r14 0x001fe000", "al_r14_"
- "0x001fe000"},
- {{al, r14, 0x003fc000}, false, al, "al r14 0x003fc000", "al_r14_"
- "0x003fc000"},
- {{al, r14, 0x007f8000}, false, al, "al r14 0x007f8000", "al_r14_"
- "0x007f8000"},
- {{al, r14, 0x00ff0000}, false, al, "al r14 0x00ff0000", "al_r14_"
- "0x00ff0000"},
- {{al, r14, 0x01fe0000}, false, al, "al r14 0x01fe0000", "al_r14_"
- "0x01fe0000"},
- {{al, r14, 0x03fc0000}, false, al, "al r14 0x03fc0000", "al_r14_"
- "0x03fc0000"},
- {{al, r14, 0x07f80000}, false, al, "al r14 0x07f80000", "al_r14_"
- "0x07f80000"},
- {{al, r14, 0x0ff00000}, false, al, "al r14 0x0ff00000", "al_r14_"
- "0x0ff00000"},
- {{al, r14, 0x1fe00000}, false, al, "al r14 0x1fe00000", "al_r14_"
- "0x1fe00000"},
- {{al, r14, 0x3fc00000}, false, al, "al r14 0x3fc00000", "al_r14_"
- "0x3fc00000"},
- {{al, r14, 0x7f800000}, false, al, "al r14 0x7f800000", "al_r14_"
- "0x7f800000"},
- {{al, r14, 0xff000000}, false, al, "al r14 0xff000000", "al_r14_"
- "0xff000000"},
- {{al, r14, 0x000000ff}, false, al, "al r14 0x000000ff", "al_r14_"
- "0x000000ff"},
- {{al, r14, 0x00ff00ff}, false, al, "al r14 0x00ff00ff", "al_r14_"
- "0x00ff00ff"},
- {{al, r14, 0xff00ff00}, false, al, "al r14 0xff00ff00", "al_r14_"
- "0xff00ff00"},
- {{al, r14, 0xffffffff}, false, al, "al r14 0xffffffff", "al_r14_"
- "0xffffffff"},
- {{al, r14, 0x00000156}, false, al, "al r14 0x00000156", "al_r14_"
- "0x00000156"},
- {{al, r14, 0x000002ac}, false, al, "al r14 0x000002ac", "al_r14_"
- "0x000002ac"},
- {{al, r14, 0x00000558}, false, al, "al r14 0x00000558", "al_r14_"
- "0x00000558"},
- {{al, r14, 0x00000ab0}, false, al, "al r14 0x00000ab0", "al_r14_"
- "0x00000ab0"},
- {{al, r14, 0x00001560}, false, al, "al r14 0x00001560", "al_r14_"
- "0x00001560"},
- {{al, r14, 0x00002ac0}, false, al, "al r14 0x00002ac0", "al_r14_"
- "0x00002ac0"},
- {{al, r14, 0x00005580}, false, al, "al r14 0x00005580", "al_r14_"
- "0x00005580"},
- {{al, r14, 0x0000ab00}, false, al, "al r14 0x0000ab00", "al_r14_"
- "0x0000ab00"},
- {{al, r14, 0x00015600}, false, al, "al r14 0x00015600", "al_r14_"
- "0x00015600"},
- {{al, r14, 0x0002ac00}, false, al, "al r14 0x0002ac00", "al_r14_"
- "0x0002ac00"},
- {{al, r14, 0x00055800}, false, al, "al r14 0x00055800", "al_r14_"
- "0x00055800"},
- {{al, r14, 0x000ab000}, false, al, "al r14 0x000ab000", "al_r14_"
- "0x000ab000"},
- {{al, r14, 0x00156000}, false, al, "al r14 0x00156000", "al_r14_"
- "0x00156000"},
- {{al, r14, 0x002ac000}, false, al, "al r14 0x002ac000", "al_r14_"
- "0x002ac000"},
- {{al, r14, 0x00558000}, false, al, "al r14 0x00558000", "al_r14_"
- "0x00558000"},
- {{al, r14, 0x00ab0000}, false, al, "al r14 0x00ab0000", "al_r14_"
- "0x00ab0000"},
- {{al, r14, 0x01560000}, false, al, "al r14 0x01560000", "al_r14_"
- "0x01560000"},
- {{al, r14, 0x02ac0000}, false, al, "al r14 0x02ac0000", "al_r14_"
- "0x02ac0000"},
- {{al, r14, 0x05580000}, false, al, "al r14 0x05580000", "al_r14_"
- "0x05580000"},
- {{al, r14, 0x0ab00000}, false, al, "al r14 0x0ab00000", "al_r14_"
- "0x0ab00000"},
- {{al, r14, 0x15600000}, false, al, "al r14 0x15600000", "al_r14_"
- "0x15600000"},
- {{al, r14, 0x2ac00000}, false, al, "al r14 0x2ac00000", "al_r14_"
- "0x2ac00000"},
- {{al, r14, 0x55800000}, false, al, "al r14 0x55800000", "al_r14_"
- "0x55800000"},
- {{al, r14, 0xab000000}, false, al, "al r14 0xab000000", "al_r14_"
- "0xab000000"},
- {{al, r14, 0x000000ab}, false, al, "al r14 0x000000ab", "al_r14_"
- "0x000000ab"},
- {{al, r14, 0x00ab00ab}, false, al, "al r14 0x00ab00ab", "al_r14_"
- "0x00ab00ab"},
- {{al, r14, 0xab00ab00}, false, al, "al r14 0xab00ab00", "al_r14_"
- "0xab00ab00"},
- {{al, r14, 0xabababab},
- false,
- al,
- "al r14 0xabababab",
- "al_r14_0xabababab"}};
+const TestData kTests[] =
+ {{{al, r0, 0x000001fe}, false, al, "al r0 0x000001fe", "al_r0_0x000001fe"},
+ {{al, r0, 0x000003fc}, false, al, "al r0 0x000003fc", "al_r0_0x000003fc"},
+ {{al, r0, 0x000007f8}, false, al, "al r0 0x000007f8", "al_r0_0x000007f8"},
+ {{al, r0, 0x00000ff0}, false, al, "al r0 0x00000ff0", "al_r0_0x00000ff0"},
+ {{al, r0, 0x00001fe0}, false, al, "al r0 0x00001fe0", "al_r0_0x00001fe0"},
+ {{al, r0, 0x00003fc0}, false, al, "al r0 0x00003fc0", "al_r0_0x00003fc0"},
+ {{al, r0, 0x00007f80}, false, al, "al r0 0x00007f80", "al_r0_0x00007f80"},
+ {{al, r0, 0x0000ff00}, false, al, "al r0 0x0000ff00", "al_r0_0x0000ff00"},
+ {{al, r0, 0x0001fe00}, false, al, "al r0 0x0001fe00", "al_r0_0x0001fe00"},
+ {{al, r0, 0x0003fc00}, false, al, "al r0 0x0003fc00", "al_r0_0x0003fc00"},
+ {{al, r0, 0x0007f800}, false, al, "al r0 0x0007f800", "al_r0_0x0007f800"},
+ {{al, r0, 0x000ff000}, false, al, "al r0 0x000ff000", "al_r0_0x000ff000"},
+ {{al, r0, 0x001fe000}, false, al, "al r0 0x001fe000", "al_r0_0x001fe000"},
+ {{al, r0, 0x003fc000}, false, al, "al r0 0x003fc000", "al_r0_0x003fc000"},
+ {{al, r0, 0x007f8000}, false, al, "al r0 0x007f8000", "al_r0_0x007f8000"},
+ {{al, r0, 0x00ff0000}, false, al, "al r0 0x00ff0000", "al_r0_0x00ff0000"},
+ {{al, r0, 0x01fe0000}, false, al, "al r0 0x01fe0000", "al_r0_0x01fe0000"},
+ {{al, r0, 0x03fc0000}, false, al, "al r0 0x03fc0000", "al_r0_0x03fc0000"},
+ {{al, r0, 0x07f80000}, false, al, "al r0 0x07f80000", "al_r0_0x07f80000"},
+ {{al, r0, 0x0ff00000}, false, al, "al r0 0x0ff00000", "al_r0_0x0ff00000"},
+ {{al, r0, 0x1fe00000}, false, al, "al r0 0x1fe00000", "al_r0_0x1fe00000"},
+ {{al, r0, 0x3fc00000}, false, al, "al r0 0x3fc00000", "al_r0_0x3fc00000"},
+ {{al, r0, 0x7f800000}, false, al, "al r0 0x7f800000", "al_r0_0x7f800000"},
+ {{al, r0, 0xff000000}, false, al, "al r0 0xff000000", "al_r0_0xff000000"},
+ {{al, r0, 0x000000ff}, false, al, "al r0 0x000000ff", "al_r0_0x000000ff"},
+ {{al, r0, 0x00ff00ff}, false, al, "al r0 0x00ff00ff", "al_r0_0x00ff00ff"},
+ {{al, r0, 0xff00ff00}, false, al, "al r0 0xff00ff00", "al_r0_0xff00ff00"},
+ {{al, r0, 0xffffffff}, false, al, "al r0 0xffffffff", "al_r0_0xffffffff"},
+ {{al, r0, 0x00000156}, false, al, "al r0 0x00000156", "al_r0_0x00000156"},
+ {{al, r0, 0x000002ac}, false, al, "al r0 0x000002ac", "al_r0_0x000002ac"},
+ {{al, r0, 0x00000558}, false, al, "al r0 0x00000558", "al_r0_0x00000558"},
+ {{al, r0, 0x00000ab0}, false, al, "al r0 0x00000ab0", "al_r0_0x00000ab0"},
+ {{al, r0, 0x00001560}, false, al, "al r0 0x00001560", "al_r0_0x00001560"},
+ {{al, r0, 0x00002ac0}, false, al, "al r0 0x00002ac0", "al_r0_0x00002ac0"},
+ {{al, r0, 0x00005580}, false, al, "al r0 0x00005580", "al_r0_0x00005580"},
+ {{al, r0, 0x0000ab00}, false, al, "al r0 0x0000ab00", "al_r0_0x0000ab00"},
+ {{al, r0, 0x00015600}, false, al, "al r0 0x00015600", "al_r0_0x00015600"},
+ {{al, r0, 0x0002ac00}, false, al, "al r0 0x0002ac00", "al_r0_0x0002ac00"},
+ {{al, r0, 0x00055800}, false, al, "al r0 0x00055800", "al_r0_0x00055800"},
+ {{al, r0, 0x000ab000}, false, al, "al r0 0x000ab000", "al_r0_0x000ab000"},
+ {{al, r0, 0x00156000}, false, al, "al r0 0x00156000", "al_r0_0x00156000"},
+ {{al, r0, 0x002ac000}, false, al, "al r0 0x002ac000", "al_r0_0x002ac000"},
+ {{al, r0, 0x00558000}, false, al, "al r0 0x00558000", "al_r0_0x00558000"},
+ {{al, r0, 0x00ab0000}, false, al, "al r0 0x00ab0000", "al_r0_0x00ab0000"},
+ {{al, r0, 0x01560000}, false, al, "al r0 0x01560000", "al_r0_0x01560000"},
+ {{al, r0, 0x02ac0000}, false, al, "al r0 0x02ac0000", "al_r0_0x02ac0000"},
+ {{al, r0, 0x05580000}, false, al, "al r0 0x05580000", "al_r0_0x05580000"},
+ {{al, r0, 0x0ab00000}, false, al, "al r0 0x0ab00000", "al_r0_0x0ab00000"},
+ {{al, r0, 0x15600000}, false, al, "al r0 0x15600000", "al_r0_0x15600000"},
+ {{al, r0, 0x2ac00000}, false, al, "al r0 0x2ac00000", "al_r0_0x2ac00000"},
+ {{al, r0, 0x55800000}, false, al, "al r0 0x55800000", "al_r0_0x55800000"},
+ {{al, r0, 0xab000000}, false, al, "al r0 0xab000000", "al_r0_0xab000000"},
+ {{al, r0, 0x000000ab}, false, al, "al r0 0x000000ab", "al_r0_0x000000ab"},
+ {{al, r0, 0x00ab00ab}, false, al, "al r0 0x00ab00ab", "al_r0_0x00ab00ab"},
+ {{al, r0, 0xab00ab00}, false, al, "al r0 0xab00ab00", "al_r0_0xab00ab00"},
+ {{al, r0, 0xabababab}, false, al, "al r0 0xabababab", "al_r0_0xabababab"},
+ {{al, r1, 0x000001fe}, false, al, "al r1 0x000001fe", "al_r1_0x000001fe"},
+ {{al, r1, 0x000003fc}, false, al, "al r1 0x000003fc", "al_r1_0x000003fc"},
+ {{al, r1, 0x000007f8}, false, al, "al r1 0x000007f8", "al_r1_0x000007f8"},
+ {{al, r1, 0x00000ff0}, false, al, "al r1 0x00000ff0", "al_r1_0x00000ff0"},
+ {{al, r1, 0x00001fe0}, false, al, "al r1 0x00001fe0", "al_r1_0x00001fe0"},
+ {{al, r1, 0x00003fc0}, false, al, "al r1 0x00003fc0", "al_r1_0x00003fc0"},
+ {{al, r1, 0x00007f80}, false, al, "al r1 0x00007f80", "al_r1_0x00007f80"},
+ {{al, r1, 0x0000ff00}, false, al, "al r1 0x0000ff00", "al_r1_0x0000ff00"},
+ {{al, r1, 0x0001fe00}, false, al, "al r1 0x0001fe00", "al_r1_0x0001fe00"},
+ {{al, r1, 0x0003fc00}, false, al, "al r1 0x0003fc00", "al_r1_0x0003fc00"},
+ {{al, r1, 0x0007f800}, false, al, "al r1 0x0007f800", "al_r1_0x0007f800"},
+ {{al, r1, 0x000ff000}, false, al, "al r1 0x000ff000", "al_r1_0x000ff000"},
+ {{al, r1, 0x001fe000}, false, al, "al r1 0x001fe000", "al_r1_0x001fe000"},
+ {{al, r1, 0x003fc000}, false, al, "al r1 0x003fc000", "al_r1_0x003fc000"},
+ {{al, r1, 0x007f8000}, false, al, "al r1 0x007f8000", "al_r1_0x007f8000"},
+ {{al, r1, 0x00ff0000}, false, al, "al r1 0x00ff0000", "al_r1_0x00ff0000"},
+ {{al, r1, 0x01fe0000}, false, al, "al r1 0x01fe0000", "al_r1_0x01fe0000"},
+ {{al, r1, 0x03fc0000}, false, al, "al r1 0x03fc0000", "al_r1_0x03fc0000"},
+ {{al, r1, 0x07f80000}, false, al, "al r1 0x07f80000", "al_r1_0x07f80000"},
+ {{al, r1, 0x0ff00000}, false, al, "al r1 0x0ff00000", "al_r1_0x0ff00000"},
+ {{al, r1, 0x1fe00000}, false, al, "al r1 0x1fe00000", "al_r1_0x1fe00000"},
+ {{al, r1, 0x3fc00000}, false, al, "al r1 0x3fc00000", "al_r1_0x3fc00000"},
+ {{al, r1, 0x7f800000}, false, al, "al r1 0x7f800000", "al_r1_0x7f800000"},
+ {{al, r1, 0xff000000}, false, al, "al r1 0xff000000", "al_r1_0xff000000"},
+ {{al, r1, 0x000000ff}, false, al, "al r1 0x000000ff", "al_r1_0x000000ff"},
+ {{al, r1, 0x00ff00ff}, false, al, "al r1 0x00ff00ff", "al_r1_0x00ff00ff"},
+ {{al, r1, 0xff00ff00}, false, al, "al r1 0xff00ff00", "al_r1_0xff00ff00"},
+ {{al, r1, 0xffffffff}, false, al, "al r1 0xffffffff", "al_r1_0xffffffff"},
+ {{al, r1, 0x00000156}, false, al, "al r1 0x00000156", "al_r1_0x00000156"},
+ {{al, r1, 0x000002ac}, false, al, "al r1 0x000002ac", "al_r1_0x000002ac"},
+ {{al, r1, 0x00000558}, false, al, "al r1 0x00000558", "al_r1_0x00000558"},
+ {{al, r1, 0x00000ab0}, false, al, "al r1 0x00000ab0", "al_r1_0x00000ab0"},
+ {{al, r1, 0x00001560}, false, al, "al r1 0x00001560", "al_r1_0x00001560"},
+ {{al, r1, 0x00002ac0}, false, al, "al r1 0x00002ac0", "al_r1_0x00002ac0"},
+ {{al, r1, 0x00005580}, false, al, "al r1 0x00005580", "al_r1_0x00005580"},
+ {{al, r1, 0x0000ab00}, false, al, "al r1 0x0000ab00", "al_r1_0x0000ab00"},
+ {{al, r1, 0x00015600}, false, al, "al r1 0x00015600", "al_r1_0x00015600"},
+ {{al, r1, 0x0002ac00}, false, al, "al r1 0x0002ac00", "al_r1_0x0002ac00"},
+ {{al, r1, 0x00055800}, false, al, "al r1 0x00055800", "al_r1_0x00055800"},
+ {{al, r1, 0x000ab000}, false, al, "al r1 0x000ab000", "al_r1_0x000ab000"},
+ {{al, r1, 0x00156000}, false, al, "al r1 0x00156000", "al_r1_0x00156000"},
+ {{al, r1, 0x002ac000}, false, al, "al r1 0x002ac000", "al_r1_0x002ac000"},
+ {{al, r1, 0x00558000}, false, al, "al r1 0x00558000", "al_r1_0x00558000"},
+ {{al, r1, 0x00ab0000}, false, al, "al r1 0x00ab0000", "al_r1_0x00ab0000"},
+ {{al, r1, 0x01560000}, false, al, "al r1 0x01560000", "al_r1_0x01560000"},
+ {{al, r1, 0x02ac0000}, false, al, "al r1 0x02ac0000", "al_r1_0x02ac0000"},
+ {{al, r1, 0x05580000}, false, al, "al r1 0x05580000", "al_r1_0x05580000"},
+ {{al, r1, 0x0ab00000}, false, al, "al r1 0x0ab00000", "al_r1_0x0ab00000"},
+ {{al, r1, 0x15600000}, false, al, "al r1 0x15600000", "al_r1_0x15600000"},
+ {{al, r1, 0x2ac00000}, false, al, "al r1 0x2ac00000", "al_r1_0x2ac00000"},
+ {{al, r1, 0x55800000}, false, al, "al r1 0x55800000", "al_r1_0x55800000"},
+ {{al, r1, 0xab000000}, false, al, "al r1 0xab000000", "al_r1_0xab000000"},
+ {{al, r1, 0x000000ab}, false, al, "al r1 0x000000ab", "al_r1_0x000000ab"},
+ {{al, r1, 0x00ab00ab}, false, al, "al r1 0x00ab00ab", "al_r1_0x00ab00ab"},
+ {{al, r1, 0xab00ab00}, false, al, "al r1 0xab00ab00", "al_r1_0xab00ab00"},
+ {{al, r1, 0xabababab}, false, al, "al r1 0xabababab", "al_r1_0xabababab"},
+ {{al, r2, 0x000001fe}, false, al, "al r2 0x000001fe", "al_r2_0x000001fe"},
+ {{al, r2, 0x000003fc}, false, al, "al r2 0x000003fc", "al_r2_0x000003fc"},
+ {{al, r2, 0x000007f8}, false, al, "al r2 0x000007f8", "al_r2_0x000007f8"},
+ {{al, r2, 0x00000ff0}, false, al, "al r2 0x00000ff0", "al_r2_0x00000ff0"},
+ {{al, r2, 0x00001fe0}, false, al, "al r2 0x00001fe0", "al_r2_0x00001fe0"},
+ {{al, r2, 0x00003fc0}, false, al, "al r2 0x00003fc0", "al_r2_0x00003fc0"},
+ {{al, r2, 0x00007f80}, false, al, "al r2 0x00007f80", "al_r2_0x00007f80"},
+ {{al, r2, 0x0000ff00}, false, al, "al r2 0x0000ff00", "al_r2_0x0000ff00"},
+ {{al, r2, 0x0001fe00}, false, al, "al r2 0x0001fe00", "al_r2_0x0001fe00"},
+ {{al, r2, 0x0003fc00}, false, al, "al r2 0x0003fc00", "al_r2_0x0003fc00"},
+ {{al, r2, 0x0007f800}, false, al, "al r2 0x0007f800", "al_r2_0x0007f800"},
+ {{al, r2, 0x000ff000}, false, al, "al r2 0x000ff000", "al_r2_0x000ff000"},
+ {{al, r2, 0x001fe000}, false, al, "al r2 0x001fe000", "al_r2_0x001fe000"},
+ {{al, r2, 0x003fc000}, false, al, "al r2 0x003fc000", "al_r2_0x003fc000"},
+ {{al, r2, 0x007f8000}, false, al, "al r2 0x007f8000", "al_r2_0x007f8000"},
+ {{al, r2, 0x00ff0000}, false, al, "al r2 0x00ff0000", "al_r2_0x00ff0000"},
+ {{al, r2, 0x01fe0000}, false, al, "al r2 0x01fe0000", "al_r2_0x01fe0000"},
+ {{al, r2, 0x03fc0000}, false, al, "al r2 0x03fc0000", "al_r2_0x03fc0000"},
+ {{al, r2, 0x07f80000}, false, al, "al r2 0x07f80000", "al_r2_0x07f80000"},
+ {{al, r2, 0x0ff00000}, false, al, "al r2 0x0ff00000", "al_r2_0x0ff00000"},
+ {{al, r2, 0x1fe00000}, false, al, "al r2 0x1fe00000", "al_r2_0x1fe00000"},
+ {{al, r2, 0x3fc00000}, false, al, "al r2 0x3fc00000", "al_r2_0x3fc00000"},
+ {{al, r2, 0x7f800000}, false, al, "al r2 0x7f800000", "al_r2_0x7f800000"},
+ {{al, r2, 0xff000000}, false, al, "al r2 0xff000000", "al_r2_0xff000000"},
+ {{al, r2, 0x000000ff}, false, al, "al r2 0x000000ff", "al_r2_0x000000ff"},
+ {{al, r2, 0x00ff00ff}, false, al, "al r2 0x00ff00ff", "al_r2_0x00ff00ff"},
+ {{al, r2, 0xff00ff00}, false, al, "al r2 0xff00ff00", "al_r2_0xff00ff00"},
+ {{al, r2, 0xffffffff}, false, al, "al r2 0xffffffff", "al_r2_0xffffffff"},
+ {{al, r2, 0x00000156}, false, al, "al r2 0x00000156", "al_r2_0x00000156"},
+ {{al, r2, 0x000002ac}, false, al, "al r2 0x000002ac", "al_r2_0x000002ac"},
+ {{al, r2, 0x00000558}, false, al, "al r2 0x00000558", "al_r2_0x00000558"},
+ {{al, r2, 0x00000ab0}, false, al, "al r2 0x00000ab0", "al_r2_0x00000ab0"},
+ {{al, r2, 0x00001560}, false, al, "al r2 0x00001560", "al_r2_0x00001560"},
+ {{al, r2, 0x00002ac0}, false, al, "al r2 0x00002ac0", "al_r2_0x00002ac0"},
+ {{al, r2, 0x00005580}, false, al, "al r2 0x00005580", "al_r2_0x00005580"},
+ {{al, r2, 0x0000ab00}, false, al, "al r2 0x0000ab00", "al_r2_0x0000ab00"},
+ {{al, r2, 0x00015600}, false, al, "al r2 0x00015600", "al_r2_0x00015600"},
+ {{al, r2, 0x0002ac00}, false, al, "al r2 0x0002ac00", "al_r2_0x0002ac00"},
+ {{al, r2, 0x00055800}, false, al, "al r2 0x00055800", "al_r2_0x00055800"},
+ {{al, r2, 0x000ab000}, false, al, "al r2 0x000ab000", "al_r2_0x000ab000"},
+ {{al, r2, 0x00156000}, false, al, "al r2 0x00156000", "al_r2_0x00156000"},
+ {{al, r2, 0x002ac000}, false, al, "al r2 0x002ac000", "al_r2_0x002ac000"},
+ {{al, r2, 0x00558000}, false, al, "al r2 0x00558000", "al_r2_0x00558000"},
+ {{al, r2, 0x00ab0000}, false, al, "al r2 0x00ab0000", "al_r2_0x00ab0000"},
+ {{al, r2, 0x01560000}, false, al, "al r2 0x01560000", "al_r2_0x01560000"},
+ {{al, r2, 0x02ac0000}, false, al, "al r2 0x02ac0000", "al_r2_0x02ac0000"},
+ {{al, r2, 0x05580000}, false, al, "al r2 0x05580000", "al_r2_0x05580000"},
+ {{al, r2, 0x0ab00000}, false, al, "al r2 0x0ab00000", "al_r2_0x0ab00000"},
+ {{al, r2, 0x15600000}, false, al, "al r2 0x15600000", "al_r2_0x15600000"},
+ {{al, r2, 0x2ac00000}, false, al, "al r2 0x2ac00000", "al_r2_0x2ac00000"},
+ {{al, r2, 0x55800000}, false, al, "al r2 0x55800000", "al_r2_0x55800000"},
+ {{al, r2, 0xab000000}, false, al, "al r2 0xab000000", "al_r2_0xab000000"},
+ {{al, r2, 0x000000ab}, false, al, "al r2 0x000000ab", "al_r2_0x000000ab"},
+ {{al, r2, 0x00ab00ab}, false, al, "al r2 0x00ab00ab", "al_r2_0x00ab00ab"},
+ {{al, r2, 0xab00ab00}, false, al, "al r2 0xab00ab00", "al_r2_0xab00ab00"},
+ {{al, r2, 0xabababab}, false, al, "al r2 0xabababab", "al_r2_0xabababab"},
+ {{al, r3, 0x000001fe}, false, al, "al r3 0x000001fe", "al_r3_0x000001fe"},
+ {{al, r3, 0x000003fc}, false, al, "al r3 0x000003fc", "al_r3_0x000003fc"},
+ {{al, r3, 0x000007f8}, false, al, "al r3 0x000007f8", "al_r3_0x000007f8"},
+ {{al, r3, 0x00000ff0}, false, al, "al r3 0x00000ff0", "al_r3_0x00000ff0"},
+ {{al, r3, 0x00001fe0}, false, al, "al r3 0x00001fe0", "al_r3_0x00001fe0"},
+ {{al, r3, 0x00003fc0}, false, al, "al r3 0x00003fc0", "al_r3_0x00003fc0"},
+ {{al, r3, 0x00007f80}, false, al, "al r3 0x00007f80", "al_r3_0x00007f80"},
+ {{al, r3, 0x0000ff00}, false, al, "al r3 0x0000ff00", "al_r3_0x0000ff00"},
+ {{al, r3, 0x0001fe00}, false, al, "al r3 0x0001fe00", "al_r3_0x0001fe00"},
+ {{al, r3, 0x0003fc00}, false, al, "al r3 0x0003fc00", "al_r3_0x0003fc00"},
+ {{al, r3, 0x0007f800}, false, al, "al r3 0x0007f800", "al_r3_0x0007f800"},
+ {{al, r3, 0x000ff000}, false, al, "al r3 0x000ff000", "al_r3_0x000ff000"},
+ {{al, r3, 0x001fe000}, false, al, "al r3 0x001fe000", "al_r3_0x001fe000"},
+ {{al, r3, 0x003fc000}, false, al, "al r3 0x003fc000", "al_r3_0x003fc000"},
+ {{al, r3, 0x007f8000}, false, al, "al r3 0x007f8000", "al_r3_0x007f8000"},
+ {{al, r3, 0x00ff0000}, false, al, "al r3 0x00ff0000", "al_r3_0x00ff0000"},
+ {{al, r3, 0x01fe0000}, false, al, "al r3 0x01fe0000", "al_r3_0x01fe0000"},
+ {{al, r3, 0x03fc0000}, false, al, "al r3 0x03fc0000", "al_r3_0x03fc0000"},
+ {{al, r3, 0x07f80000}, false, al, "al r3 0x07f80000", "al_r3_0x07f80000"},
+ {{al, r3, 0x0ff00000}, false, al, "al r3 0x0ff00000", "al_r3_0x0ff00000"},
+ {{al, r3, 0x1fe00000}, false, al, "al r3 0x1fe00000", "al_r3_0x1fe00000"},
+ {{al, r3, 0x3fc00000}, false, al, "al r3 0x3fc00000", "al_r3_0x3fc00000"},
+ {{al, r3, 0x7f800000}, false, al, "al r3 0x7f800000", "al_r3_0x7f800000"},
+ {{al, r3, 0xff000000}, false, al, "al r3 0xff000000", "al_r3_0xff000000"},
+ {{al, r3, 0x000000ff}, false, al, "al r3 0x000000ff", "al_r3_0x000000ff"},
+ {{al, r3, 0x00ff00ff}, false, al, "al r3 0x00ff00ff", "al_r3_0x00ff00ff"},
+ {{al, r3, 0xff00ff00}, false, al, "al r3 0xff00ff00", "al_r3_0xff00ff00"},
+ {{al, r3, 0xffffffff}, false, al, "al r3 0xffffffff", "al_r3_0xffffffff"},
+ {{al, r3, 0x00000156}, false, al, "al r3 0x00000156", "al_r3_0x00000156"},
+ {{al, r3, 0x000002ac}, false, al, "al r3 0x000002ac", "al_r3_0x000002ac"},
+ {{al, r3, 0x00000558}, false, al, "al r3 0x00000558", "al_r3_0x00000558"},
+ {{al, r3, 0x00000ab0}, false, al, "al r3 0x00000ab0", "al_r3_0x00000ab0"},
+ {{al, r3, 0x00001560}, false, al, "al r3 0x00001560", "al_r3_0x00001560"},
+ {{al, r3, 0x00002ac0}, false, al, "al r3 0x00002ac0", "al_r3_0x00002ac0"},
+ {{al, r3, 0x00005580}, false, al, "al r3 0x00005580", "al_r3_0x00005580"},
+ {{al, r3, 0x0000ab00}, false, al, "al r3 0x0000ab00", "al_r3_0x0000ab00"},
+ {{al, r3, 0x00015600}, false, al, "al r3 0x00015600", "al_r3_0x00015600"},
+ {{al, r3, 0x0002ac00}, false, al, "al r3 0x0002ac00", "al_r3_0x0002ac00"},
+ {{al, r3, 0x00055800}, false, al, "al r3 0x00055800", "al_r3_0x00055800"},
+ {{al, r3, 0x000ab000}, false, al, "al r3 0x000ab000", "al_r3_0x000ab000"},
+ {{al, r3, 0x00156000}, false, al, "al r3 0x00156000", "al_r3_0x00156000"},
+ {{al, r3, 0x002ac000}, false, al, "al r3 0x002ac000", "al_r3_0x002ac000"},
+ {{al, r3, 0x00558000}, false, al, "al r3 0x00558000", "al_r3_0x00558000"},
+ {{al, r3, 0x00ab0000}, false, al, "al r3 0x00ab0000", "al_r3_0x00ab0000"},
+ {{al, r3, 0x01560000}, false, al, "al r3 0x01560000", "al_r3_0x01560000"},
+ {{al, r3, 0x02ac0000}, false, al, "al r3 0x02ac0000", "al_r3_0x02ac0000"},
+ {{al, r3, 0x05580000}, false, al, "al r3 0x05580000", "al_r3_0x05580000"},
+ {{al, r3, 0x0ab00000}, false, al, "al r3 0x0ab00000", "al_r3_0x0ab00000"},
+ {{al, r3, 0x15600000}, false, al, "al r3 0x15600000", "al_r3_0x15600000"},
+ {{al, r3, 0x2ac00000}, false, al, "al r3 0x2ac00000", "al_r3_0x2ac00000"},
+ {{al, r3, 0x55800000}, false, al, "al r3 0x55800000", "al_r3_0x55800000"},
+ {{al, r3, 0xab000000}, false, al, "al r3 0xab000000", "al_r3_0xab000000"},
+ {{al, r3, 0x000000ab}, false, al, "al r3 0x000000ab", "al_r3_0x000000ab"},
+ {{al, r3, 0x00ab00ab}, false, al, "al r3 0x00ab00ab", "al_r3_0x00ab00ab"},
+ {{al, r3, 0xab00ab00}, false, al, "al r3 0xab00ab00", "al_r3_0xab00ab00"},
+ {{al, r3, 0xabababab}, false, al, "al r3 0xabababab", "al_r3_0xabababab"},
+ {{al, r4, 0x000001fe}, false, al, "al r4 0x000001fe", "al_r4_0x000001fe"},
+ {{al, r4, 0x000003fc}, false, al, "al r4 0x000003fc", "al_r4_0x000003fc"},
+ {{al, r4, 0x000007f8}, false, al, "al r4 0x000007f8", "al_r4_0x000007f8"},
+ {{al, r4, 0x00000ff0}, false, al, "al r4 0x00000ff0", "al_r4_0x00000ff0"},
+ {{al, r4, 0x00001fe0}, false, al, "al r4 0x00001fe0", "al_r4_0x00001fe0"},
+ {{al, r4, 0x00003fc0}, false, al, "al r4 0x00003fc0", "al_r4_0x00003fc0"},
+ {{al, r4, 0x00007f80}, false, al, "al r4 0x00007f80", "al_r4_0x00007f80"},
+ {{al, r4, 0x0000ff00}, false, al, "al r4 0x0000ff00", "al_r4_0x0000ff00"},
+ {{al, r4, 0x0001fe00}, false, al, "al r4 0x0001fe00", "al_r4_0x0001fe00"},
+ {{al, r4, 0x0003fc00}, false, al, "al r4 0x0003fc00", "al_r4_0x0003fc00"},
+ {{al, r4, 0x0007f800}, false, al, "al r4 0x0007f800", "al_r4_0x0007f800"},
+ {{al, r4, 0x000ff000}, false, al, "al r4 0x000ff000", "al_r4_0x000ff000"},
+ {{al, r4, 0x001fe000}, false, al, "al r4 0x001fe000", "al_r4_0x001fe000"},
+ {{al, r4, 0x003fc000}, false, al, "al r4 0x003fc000", "al_r4_0x003fc000"},
+ {{al, r4, 0x007f8000}, false, al, "al r4 0x007f8000", "al_r4_0x007f8000"},
+ {{al, r4, 0x00ff0000}, false, al, "al r4 0x00ff0000", "al_r4_0x00ff0000"},
+ {{al, r4, 0x01fe0000}, false, al, "al r4 0x01fe0000", "al_r4_0x01fe0000"},
+ {{al, r4, 0x03fc0000}, false, al, "al r4 0x03fc0000", "al_r4_0x03fc0000"},
+ {{al, r4, 0x07f80000}, false, al, "al r4 0x07f80000", "al_r4_0x07f80000"},
+ {{al, r4, 0x0ff00000}, false, al, "al r4 0x0ff00000", "al_r4_0x0ff00000"},
+ {{al, r4, 0x1fe00000}, false, al, "al r4 0x1fe00000", "al_r4_0x1fe00000"},
+ {{al, r4, 0x3fc00000}, false, al, "al r4 0x3fc00000", "al_r4_0x3fc00000"},
+ {{al, r4, 0x7f800000}, false, al, "al r4 0x7f800000", "al_r4_0x7f800000"},
+ {{al, r4, 0xff000000}, false, al, "al r4 0xff000000", "al_r4_0xff000000"},
+ {{al, r4, 0x000000ff}, false, al, "al r4 0x000000ff", "al_r4_0x000000ff"},
+ {{al, r4, 0x00ff00ff}, false, al, "al r4 0x00ff00ff", "al_r4_0x00ff00ff"},
+ {{al, r4, 0xff00ff00}, false, al, "al r4 0xff00ff00", "al_r4_0xff00ff00"},
+ {{al, r4, 0xffffffff}, false, al, "al r4 0xffffffff", "al_r4_0xffffffff"},
+ {{al, r4, 0x00000156}, false, al, "al r4 0x00000156", "al_r4_0x00000156"},
+ {{al, r4, 0x000002ac}, false, al, "al r4 0x000002ac", "al_r4_0x000002ac"},
+ {{al, r4, 0x00000558}, false, al, "al r4 0x00000558", "al_r4_0x00000558"},
+ {{al, r4, 0x00000ab0}, false, al, "al r4 0x00000ab0", "al_r4_0x00000ab0"},
+ {{al, r4, 0x00001560}, false, al, "al r4 0x00001560", "al_r4_0x00001560"},
+ {{al, r4, 0x00002ac0}, false, al, "al r4 0x00002ac0", "al_r4_0x00002ac0"},
+ {{al, r4, 0x00005580}, false, al, "al r4 0x00005580", "al_r4_0x00005580"},
+ {{al, r4, 0x0000ab00}, false, al, "al r4 0x0000ab00", "al_r4_0x0000ab00"},
+ {{al, r4, 0x00015600}, false, al, "al r4 0x00015600", "al_r4_0x00015600"},
+ {{al, r4, 0x0002ac00}, false, al, "al r4 0x0002ac00", "al_r4_0x0002ac00"},
+ {{al, r4, 0x00055800}, false, al, "al r4 0x00055800", "al_r4_0x00055800"},
+ {{al, r4, 0x000ab000}, false, al, "al r4 0x000ab000", "al_r4_0x000ab000"},
+ {{al, r4, 0x00156000}, false, al, "al r4 0x00156000", "al_r4_0x00156000"},
+ {{al, r4, 0x002ac000}, false, al, "al r4 0x002ac000", "al_r4_0x002ac000"},
+ {{al, r4, 0x00558000}, false, al, "al r4 0x00558000", "al_r4_0x00558000"},
+ {{al, r4, 0x00ab0000}, false, al, "al r4 0x00ab0000", "al_r4_0x00ab0000"},
+ {{al, r4, 0x01560000}, false, al, "al r4 0x01560000", "al_r4_0x01560000"},
+ {{al, r4, 0x02ac0000}, false, al, "al r4 0x02ac0000", "al_r4_0x02ac0000"},
+ {{al, r4, 0x05580000}, false, al, "al r4 0x05580000", "al_r4_0x05580000"},
+ {{al, r4, 0x0ab00000}, false, al, "al r4 0x0ab00000", "al_r4_0x0ab00000"},
+ {{al, r4, 0x15600000}, false, al, "al r4 0x15600000", "al_r4_0x15600000"},
+ {{al, r4, 0x2ac00000}, false, al, "al r4 0x2ac00000", "al_r4_0x2ac00000"},
+ {{al, r4, 0x55800000}, false, al, "al r4 0x55800000", "al_r4_0x55800000"},
+ {{al, r4, 0xab000000}, false, al, "al r4 0xab000000", "al_r4_0xab000000"},
+ {{al, r4, 0x000000ab}, false, al, "al r4 0x000000ab", "al_r4_0x000000ab"},
+ {{al, r4, 0x00ab00ab}, false, al, "al r4 0x00ab00ab", "al_r4_0x00ab00ab"},
+ {{al, r4, 0xab00ab00}, false, al, "al r4 0xab00ab00", "al_r4_0xab00ab00"},
+ {{al, r4, 0xabababab}, false, al, "al r4 0xabababab", "al_r4_0xabababab"},
+ {{al, r5, 0x000001fe}, false, al, "al r5 0x000001fe", "al_r5_0x000001fe"},
+ {{al, r5, 0x000003fc}, false, al, "al r5 0x000003fc", "al_r5_0x000003fc"},
+ {{al, r5, 0x000007f8}, false, al, "al r5 0x000007f8", "al_r5_0x000007f8"},
+ {{al, r5, 0x00000ff0}, false, al, "al r5 0x00000ff0", "al_r5_0x00000ff0"},
+ {{al, r5, 0x00001fe0}, false, al, "al r5 0x00001fe0", "al_r5_0x00001fe0"},
+ {{al, r5, 0x00003fc0}, false, al, "al r5 0x00003fc0", "al_r5_0x00003fc0"},
+ {{al, r5, 0x00007f80}, false, al, "al r5 0x00007f80", "al_r5_0x00007f80"},
+ {{al, r5, 0x0000ff00}, false, al, "al r5 0x0000ff00", "al_r5_0x0000ff00"},
+ {{al, r5, 0x0001fe00}, false, al, "al r5 0x0001fe00", "al_r5_0x0001fe00"},
+ {{al, r5, 0x0003fc00}, false, al, "al r5 0x0003fc00", "al_r5_0x0003fc00"},
+ {{al, r5, 0x0007f800}, false, al, "al r5 0x0007f800", "al_r5_0x0007f800"},
+ {{al, r5, 0x000ff000}, false, al, "al r5 0x000ff000", "al_r5_0x000ff000"},
+ {{al, r5, 0x001fe000}, false, al, "al r5 0x001fe000", "al_r5_0x001fe000"},
+ {{al, r5, 0x003fc000}, false, al, "al r5 0x003fc000", "al_r5_0x003fc000"},
+ {{al, r5, 0x007f8000}, false, al, "al r5 0x007f8000", "al_r5_0x007f8000"},
+ {{al, r5, 0x00ff0000}, false, al, "al r5 0x00ff0000", "al_r5_0x00ff0000"},
+ {{al, r5, 0x01fe0000}, false, al, "al r5 0x01fe0000", "al_r5_0x01fe0000"},
+ {{al, r5, 0x03fc0000}, false, al, "al r5 0x03fc0000", "al_r5_0x03fc0000"},
+ {{al, r5, 0x07f80000}, false, al, "al r5 0x07f80000", "al_r5_0x07f80000"},
+ {{al, r5, 0x0ff00000}, false, al, "al r5 0x0ff00000", "al_r5_0x0ff00000"},
+ {{al, r5, 0x1fe00000}, false, al, "al r5 0x1fe00000", "al_r5_0x1fe00000"},
+ {{al, r5, 0x3fc00000}, false, al, "al r5 0x3fc00000", "al_r5_0x3fc00000"},
+ {{al, r5, 0x7f800000}, false, al, "al r5 0x7f800000", "al_r5_0x7f800000"},
+ {{al, r5, 0xff000000}, false, al, "al r5 0xff000000", "al_r5_0xff000000"},
+ {{al, r5, 0x000000ff}, false, al, "al r5 0x000000ff", "al_r5_0x000000ff"},
+ {{al, r5, 0x00ff00ff}, false, al, "al r5 0x00ff00ff", "al_r5_0x00ff00ff"},
+ {{al, r5, 0xff00ff00}, false, al, "al r5 0xff00ff00", "al_r5_0xff00ff00"},
+ {{al, r5, 0xffffffff}, false, al, "al r5 0xffffffff", "al_r5_0xffffffff"},
+ {{al, r5, 0x00000156}, false, al, "al r5 0x00000156", "al_r5_0x00000156"},
+ {{al, r5, 0x000002ac}, false, al, "al r5 0x000002ac", "al_r5_0x000002ac"},
+ {{al, r5, 0x00000558}, false, al, "al r5 0x00000558", "al_r5_0x00000558"},
+ {{al, r5, 0x00000ab0}, false, al, "al r5 0x00000ab0", "al_r5_0x00000ab0"},
+ {{al, r5, 0x00001560}, false, al, "al r5 0x00001560", "al_r5_0x00001560"},
+ {{al, r5, 0x00002ac0}, false, al, "al r5 0x00002ac0", "al_r5_0x00002ac0"},
+ {{al, r5, 0x00005580}, false, al, "al r5 0x00005580", "al_r5_0x00005580"},
+ {{al, r5, 0x0000ab00}, false, al, "al r5 0x0000ab00", "al_r5_0x0000ab00"},
+ {{al, r5, 0x00015600}, false, al, "al r5 0x00015600", "al_r5_0x00015600"},
+ {{al, r5, 0x0002ac00}, false, al, "al r5 0x0002ac00", "al_r5_0x0002ac00"},
+ {{al, r5, 0x00055800}, false, al, "al r5 0x00055800", "al_r5_0x00055800"},
+ {{al, r5, 0x000ab000}, false, al, "al r5 0x000ab000", "al_r5_0x000ab000"},
+ {{al, r5, 0x00156000}, false, al, "al r5 0x00156000", "al_r5_0x00156000"},
+ {{al, r5, 0x002ac000}, false, al, "al r5 0x002ac000", "al_r5_0x002ac000"},
+ {{al, r5, 0x00558000}, false, al, "al r5 0x00558000", "al_r5_0x00558000"},
+ {{al, r5, 0x00ab0000}, false, al, "al r5 0x00ab0000", "al_r5_0x00ab0000"},
+ {{al, r5, 0x01560000}, false, al, "al r5 0x01560000", "al_r5_0x01560000"},
+ {{al, r5, 0x02ac0000}, false, al, "al r5 0x02ac0000", "al_r5_0x02ac0000"},
+ {{al, r5, 0x05580000}, false, al, "al r5 0x05580000", "al_r5_0x05580000"},
+ {{al, r5, 0x0ab00000}, false, al, "al r5 0x0ab00000", "al_r5_0x0ab00000"},
+ {{al, r5, 0x15600000}, false, al, "al r5 0x15600000", "al_r5_0x15600000"},
+ {{al, r5, 0x2ac00000}, false, al, "al r5 0x2ac00000", "al_r5_0x2ac00000"},
+ {{al, r5, 0x55800000}, false, al, "al r5 0x55800000", "al_r5_0x55800000"},
+ {{al, r5, 0xab000000}, false, al, "al r5 0xab000000", "al_r5_0xab000000"},
+ {{al, r5, 0x000000ab}, false, al, "al r5 0x000000ab", "al_r5_0x000000ab"},
+ {{al, r5, 0x00ab00ab}, false, al, "al r5 0x00ab00ab", "al_r5_0x00ab00ab"},
+ {{al, r5, 0xab00ab00}, false, al, "al r5 0xab00ab00", "al_r5_0xab00ab00"},
+ {{al, r5, 0xabababab}, false, al, "al r5 0xabababab", "al_r5_0xabababab"},
+ {{al, r6, 0x000001fe}, false, al, "al r6 0x000001fe", "al_r6_0x000001fe"},
+ {{al, r6, 0x000003fc}, false, al, "al r6 0x000003fc", "al_r6_0x000003fc"},
+ {{al, r6, 0x000007f8}, false, al, "al r6 0x000007f8", "al_r6_0x000007f8"},
+ {{al, r6, 0x00000ff0}, false, al, "al r6 0x00000ff0", "al_r6_0x00000ff0"},
+ {{al, r6, 0x00001fe0}, false, al, "al r6 0x00001fe0", "al_r6_0x00001fe0"},
+ {{al, r6, 0x00003fc0}, false, al, "al r6 0x00003fc0", "al_r6_0x00003fc0"},
+ {{al, r6, 0x00007f80}, false, al, "al r6 0x00007f80", "al_r6_0x00007f80"},
+ {{al, r6, 0x0000ff00}, false, al, "al r6 0x0000ff00", "al_r6_0x0000ff00"},
+ {{al, r6, 0x0001fe00}, false, al, "al r6 0x0001fe00", "al_r6_0x0001fe00"},
+ {{al, r6, 0x0003fc00}, false, al, "al r6 0x0003fc00", "al_r6_0x0003fc00"},
+ {{al, r6, 0x0007f800}, false, al, "al r6 0x0007f800", "al_r6_0x0007f800"},
+ {{al, r6, 0x000ff000}, false, al, "al r6 0x000ff000", "al_r6_0x000ff000"},
+ {{al, r6, 0x001fe000}, false, al, "al r6 0x001fe000", "al_r6_0x001fe000"},
+ {{al, r6, 0x003fc000}, false, al, "al r6 0x003fc000", "al_r6_0x003fc000"},
+ {{al, r6, 0x007f8000}, false, al, "al r6 0x007f8000", "al_r6_0x007f8000"},
+ {{al, r6, 0x00ff0000}, false, al, "al r6 0x00ff0000", "al_r6_0x00ff0000"},
+ {{al, r6, 0x01fe0000}, false, al, "al r6 0x01fe0000", "al_r6_0x01fe0000"},
+ {{al, r6, 0x03fc0000}, false, al, "al r6 0x03fc0000", "al_r6_0x03fc0000"},
+ {{al, r6, 0x07f80000}, false, al, "al r6 0x07f80000", "al_r6_0x07f80000"},
+ {{al, r6, 0x0ff00000}, false, al, "al r6 0x0ff00000", "al_r6_0x0ff00000"},
+ {{al, r6, 0x1fe00000}, false, al, "al r6 0x1fe00000", "al_r6_0x1fe00000"},
+ {{al, r6, 0x3fc00000}, false, al, "al r6 0x3fc00000", "al_r6_0x3fc00000"},
+ {{al, r6, 0x7f800000}, false, al, "al r6 0x7f800000", "al_r6_0x7f800000"},
+ {{al, r6, 0xff000000}, false, al, "al r6 0xff000000", "al_r6_0xff000000"},
+ {{al, r6, 0x000000ff}, false, al, "al r6 0x000000ff", "al_r6_0x000000ff"},
+ {{al, r6, 0x00ff00ff}, false, al, "al r6 0x00ff00ff", "al_r6_0x00ff00ff"},
+ {{al, r6, 0xff00ff00}, false, al, "al r6 0xff00ff00", "al_r6_0xff00ff00"},
+ {{al, r6, 0xffffffff}, false, al, "al r6 0xffffffff", "al_r6_0xffffffff"},
+ {{al, r6, 0x00000156}, false, al, "al r6 0x00000156", "al_r6_0x00000156"},
+ {{al, r6, 0x000002ac}, false, al, "al r6 0x000002ac", "al_r6_0x000002ac"},
+ {{al, r6, 0x00000558}, false, al, "al r6 0x00000558", "al_r6_0x00000558"},
+ {{al, r6, 0x00000ab0}, false, al, "al r6 0x00000ab0", "al_r6_0x00000ab0"},
+ {{al, r6, 0x00001560}, false, al, "al r6 0x00001560", "al_r6_0x00001560"},
+ {{al, r6, 0x00002ac0}, false, al, "al r6 0x00002ac0", "al_r6_0x00002ac0"},
+ {{al, r6, 0x00005580}, false, al, "al r6 0x00005580", "al_r6_0x00005580"},
+ {{al, r6, 0x0000ab00}, false, al, "al r6 0x0000ab00", "al_r6_0x0000ab00"},
+ {{al, r6, 0x00015600}, false, al, "al r6 0x00015600", "al_r6_0x00015600"},
+ {{al, r6, 0x0002ac00}, false, al, "al r6 0x0002ac00", "al_r6_0x0002ac00"},
+ {{al, r6, 0x00055800}, false, al, "al r6 0x00055800", "al_r6_0x00055800"},
+ {{al, r6, 0x000ab000}, false, al, "al r6 0x000ab000", "al_r6_0x000ab000"},
+ {{al, r6, 0x00156000}, false, al, "al r6 0x00156000", "al_r6_0x00156000"},
+ {{al, r6, 0x002ac000}, false, al, "al r6 0x002ac000", "al_r6_0x002ac000"},
+ {{al, r6, 0x00558000}, false, al, "al r6 0x00558000", "al_r6_0x00558000"},
+ {{al, r6, 0x00ab0000}, false, al, "al r6 0x00ab0000", "al_r6_0x00ab0000"},
+ {{al, r6, 0x01560000}, false, al, "al r6 0x01560000", "al_r6_0x01560000"},
+ {{al, r6, 0x02ac0000}, false, al, "al r6 0x02ac0000", "al_r6_0x02ac0000"},
+ {{al, r6, 0x05580000}, false, al, "al r6 0x05580000", "al_r6_0x05580000"},
+ {{al, r6, 0x0ab00000}, false, al, "al r6 0x0ab00000", "al_r6_0x0ab00000"},
+ {{al, r6, 0x15600000}, false, al, "al r6 0x15600000", "al_r6_0x15600000"},
+ {{al, r6, 0x2ac00000}, false, al, "al r6 0x2ac00000", "al_r6_0x2ac00000"},
+ {{al, r6, 0x55800000}, false, al, "al r6 0x55800000", "al_r6_0x55800000"},
+ {{al, r6, 0xab000000}, false, al, "al r6 0xab000000", "al_r6_0xab000000"},
+ {{al, r6, 0x000000ab}, false, al, "al r6 0x000000ab", "al_r6_0x000000ab"},
+ {{al, r6, 0x00ab00ab}, false, al, "al r6 0x00ab00ab", "al_r6_0x00ab00ab"},
+ {{al, r6, 0xab00ab00}, false, al, "al r6 0xab00ab00", "al_r6_0xab00ab00"},
+ {{al, r6, 0xabababab}, false, al, "al r6 0xabababab", "al_r6_0xabababab"},
+ {{al, r7, 0x000001fe}, false, al, "al r7 0x000001fe", "al_r7_0x000001fe"},
+ {{al, r7, 0x000003fc}, false, al, "al r7 0x000003fc", "al_r7_0x000003fc"},
+ {{al, r7, 0x000007f8}, false, al, "al r7 0x000007f8", "al_r7_0x000007f8"},
+ {{al, r7, 0x00000ff0}, false, al, "al r7 0x00000ff0", "al_r7_0x00000ff0"},
+ {{al, r7, 0x00001fe0}, false, al, "al r7 0x00001fe0", "al_r7_0x00001fe0"},
+ {{al, r7, 0x00003fc0}, false, al, "al r7 0x00003fc0", "al_r7_0x00003fc0"},
+ {{al, r7, 0x00007f80}, false, al, "al r7 0x00007f80", "al_r7_0x00007f80"},
+ {{al, r7, 0x0000ff00}, false, al, "al r7 0x0000ff00", "al_r7_0x0000ff00"},
+ {{al, r7, 0x0001fe00}, false, al, "al r7 0x0001fe00", "al_r7_0x0001fe00"},
+ {{al, r7, 0x0003fc00}, false, al, "al r7 0x0003fc00", "al_r7_0x0003fc00"},
+ {{al, r7, 0x0007f800}, false, al, "al r7 0x0007f800", "al_r7_0x0007f800"},
+ {{al, r7, 0x000ff000}, false, al, "al r7 0x000ff000", "al_r7_0x000ff000"},
+ {{al, r7, 0x001fe000}, false, al, "al r7 0x001fe000", "al_r7_0x001fe000"},
+ {{al, r7, 0x003fc000}, false, al, "al r7 0x003fc000", "al_r7_0x003fc000"},
+ {{al, r7, 0x007f8000}, false, al, "al r7 0x007f8000", "al_r7_0x007f8000"},
+ {{al, r7, 0x00ff0000}, false, al, "al r7 0x00ff0000", "al_r7_0x00ff0000"},
+ {{al, r7, 0x01fe0000}, false, al, "al r7 0x01fe0000", "al_r7_0x01fe0000"},
+ {{al, r7, 0x03fc0000}, false, al, "al r7 0x03fc0000", "al_r7_0x03fc0000"},
+ {{al, r7, 0x07f80000}, false, al, "al r7 0x07f80000", "al_r7_0x07f80000"},
+ {{al, r7, 0x0ff00000}, false, al, "al r7 0x0ff00000", "al_r7_0x0ff00000"},
+ {{al, r7, 0x1fe00000}, false, al, "al r7 0x1fe00000", "al_r7_0x1fe00000"},
+ {{al, r7, 0x3fc00000}, false, al, "al r7 0x3fc00000", "al_r7_0x3fc00000"},
+ {{al, r7, 0x7f800000}, false, al, "al r7 0x7f800000", "al_r7_0x7f800000"},
+ {{al, r7, 0xff000000}, false, al, "al r7 0xff000000", "al_r7_0xff000000"},
+ {{al, r7, 0x000000ff}, false, al, "al r7 0x000000ff", "al_r7_0x000000ff"},
+ {{al, r7, 0x00ff00ff}, false, al, "al r7 0x00ff00ff", "al_r7_0x00ff00ff"},
+ {{al, r7, 0xff00ff00}, false, al, "al r7 0xff00ff00", "al_r7_0xff00ff00"},
+ {{al, r7, 0xffffffff}, false, al, "al r7 0xffffffff", "al_r7_0xffffffff"},
+ {{al, r7, 0x00000156}, false, al, "al r7 0x00000156", "al_r7_0x00000156"},
+ {{al, r7, 0x000002ac}, false, al, "al r7 0x000002ac", "al_r7_0x000002ac"},
+ {{al, r7, 0x00000558}, false, al, "al r7 0x00000558", "al_r7_0x00000558"},
+ {{al, r7, 0x00000ab0}, false, al, "al r7 0x00000ab0", "al_r7_0x00000ab0"},
+ {{al, r7, 0x00001560}, false, al, "al r7 0x00001560", "al_r7_0x00001560"},
+ {{al, r7, 0x00002ac0}, false, al, "al r7 0x00002ac0", "al_r7_0x00002ac0"},
+ {{al, r7, 0x00005580}, false, al, "al r7 0x00005580", "al_r7_0x00005580"},
+ {{al, r7, 0x0000ab00}, false, al, "al r7 0x0000ab00", "al_r7_0x0000ab00"},
+ {{al, r7, 0x00015600}, false, al, "al r7 0x00015600", "al_r7_0x00015600"},
+ {{al, r7, 0x0002ac00}, false, al, "al r7 0x0002ac00", "al_r7_0x0002ac00"},
+ {{al, r7, 0x00055800}, false, al, "al r7 0x00055800", "al_r7_0x00055800"},
+ {{al, r7, 0x000ab000}, false, al, "al r7 0x000ab000", "al_r7_0x000ab000"},
+ {{al, r7, 0x00156000}, false, al, "al r7 0x00156000", "al_r7_0x00156000"},
+ {{al, r7, 0x002ac000}, false, al, "al r7 0x002ac000", "al_r7_0x002ac000"},
+ {{al, r7, 0x00558000}, false, al, "al r7 0x00558000", "al_r7_0x00558000"},
+ {{al, r7, 0x00ab0000}, false, al, "al r7 0x00ab0000", "al_r7_0x00ab0000"},
+ {{al, r7, 0x01560000}, false, al, "al r7 0x01560000", "al_r7_0x01560000"},
+ {{al, r7, 0x02ac0000}, false, al, "al r7 0x02ac0000", "al_r7_0x02ac0000"},
+ {{al, r7, 0x05580000}, false, al, "al r7 0x05580000", "al_r7_0x05580000"},
+ {{al, r7, 0x0ab00000}, false, al, "al r7 0x0ab00000", "al_r7_0x0ab00000"},
+ {{al, r7, 0x15600000}, false, al, "al r7 0x15600000", "al_r7_0x15600000"},
+ {{al, r7, 0x2ac00000}, false, al, "al r7 0x2ac00000", "al_r7_0x2ac00000"},
+ {{al, r7, 0x55800000}, false, al, "al r7 0x55800000", "al_r7_0x55800000"},
+ {{al, r7, 0xab000000}, false, al, "al r7 0xab000000", "al_r7_0xab000000"},
+ {{al, r7, 0x000000ab}, false, al, "al r7 0x000000ab", "al_r7_0x000000ab"},
+ {{al, r7, 0x00ab00ab}, false, al, "al r7 0x00ab00ab", "al_r7_0x00ab00ab"},
+ {{al, r7, 0xab00ab00}, false, al, "al r7 0xab00ab00", "al_r7_0xab00ab00"},
+ {{al, r7, 0xabababab}, false, al, "al r7 0xabababab", "al_r7_0xabababab"},
+ {{al, r8, 0x000001fe}, false, al, "al r8 0x000001fe", "al_r8_0x000001fe"},
+ {{al, r8, 0x000003fc}, false, al, "al r8 0x000003fc", "al_r8_0x000003fc"},
+ {{al, r8, 0x000007f8}, false, al, "al r8 0x000007f8", "al_r8_0x000007f8"},
+ {{al, r8, 0x00000ff0}, false, al, "al r8 0x00000ff0", "al_r8_0x00000ff0"},
+ {{al, r8, 0x00001fe0}, false, al, "al r8 0x00001fe0", "al_r8_0x00001fe0"},
+ {{al, r8, 0x00003fc0}, false, al, "al r8 0x00003fc0", "al_r8_0x00003fc0"},
+ {{al, r8, 0x00007f80}, false, al, "al r8 0x00007f80", "al_r8_0x00007f80"},
+ {{al, r8, 0x0000ff00}, false, al, "al r8 0x0000ff00", "al_r8_0x0000ff00"},
+ {{al, r8, 0x0001fe00}, false, al, "al r8 0x0001fe00", "al_r8_0x0001fe00"},
+ {{al, r8, 0x0003fc00}, false, al, "al r8 0x0003fc00", "al_r8_0x0003fc00"},
+ {{al, r8, 0x0007f800}, false, al, "al r8 0x0007f800", "al_r8_0x0007f800"},
+ {{al, r8, 0x000ff000}, false, al, "al r8 0x000ff000", "al_r8_0x000ff000"},
+ {{al, r8, 0x001fe000}, false, al, "al r8 0x001fe000", "al_r8_0x001fe000"},
+ {{al, r8, 0x003fc000}, false, al, "al r8 0x003fc000", "al_r8_0x003fc000"},
+ {{al, r8, 0x007f8000}, false, al, "al r8 0x007f8000", "al_r8_0x007f8000"},
+ {{al, r8, 0x00ff0000}, false, al, "al r8 0x00ff0000", "al_r8_0x00ff0000"},
+ {{al, r8, 0x01fe0000}, false, al, "al r8 0x01fe0000", "al_r8_0x01fe0000"},
+ {{al, r8, 0x03fc0000}, false, al, "al r8 0x03fc0000", "al_r8_0x03fc0000"},
+ {{al, r8, 0x07f80000}, false, al, "al r8 0x07f80000", "al_r8_0x07f80000"},
+ {{al, r8, 0x0ff00000}, false, al, "al r8 0x0ff00000", "al_r8_0x0ff00000"},
+ {{al, r8, 0x1fe00000}, false, al, "al r8 0x1fe00000", "al_r8_0x1fe00000"},
+ {{al, r8, 0x3fc00000}, false, al, "al r8 0x3fc00000", "al_r8_0x3fc00000"},
+ {{al, r8, 0x7f800000}, false, al, "al r8 0x7f800000", "al_r8_0x7f800000"},
+ {{al, r8, 0xff000000}, false, al, "al r8 0xff000000", "al_r8_0xff000000"},
+ {{al, r8, 0x000000ff}, false, al, "al r8 0x000000ff", "al_r8_0x000000ff"},
+ {{al, r8, 0x00ff00ff}, false, al, "al r8 0x00ff00ff", "al_r8_0x00ff00ff"},
+ {{al, r8, 0xff00ff00}, false, al, "al r8 0xff00ff00", "al_r8_0xff00ff00"},
+ {{al, r8, 0xffffffff}, false, al, "al r8 0xffffffff", "al_r8_0xffffffff"},
+ {{al, r8, 0x00000156}, false, al, "al r8 0x00000156", "al_r8_0x00000156"},
+ {{al, r8, 0x000002ac}, false, al, "al r8 0x000002ac", "al_r8_0x000002ac"},
+ {{al, r8, 0x00000558}, false, al, "al r8 0x00000558", "al_r8_0x00000558"},
+ {{al, r8, 0x00000ab0}, false, al, "al r8 0x00000ab0", "al_r8_0x00000ab0"},
+ {{al, r8, 0x00001560}, false, al, "al r8 0x00001560", "al_r8_0x00001560"},
+ {{al, r8, 0x00002ac0}, false, al, "al r8 0x00002ac0", "al_r8_0x00002ac0"},
+ {{al, r8, 0x00005580}, false, al, "al r8 0x00005580", "al_r8_0x00005580"},
+ {{al, r8, 0x0000ab00}, false, al, "al r8 0x0000ab00", "al_r8_0x0000ab00"},
+ {{al, r8, 0x00015600}, false, al, "al r8 0x00015600", "al_r8_0x00015600"},
+ {{al, r8, 0x0002ac00}, false, al, "al r8 0x0002ac00", "al_r8_0x0002ac00"},
+ {{al, r8, 0x00055800}, false, al, "al r8 0x00055800", "al_r8_0x00055800"},
+ {{al, r8, 0x000ab000}, false, al, "al r8 0x000ab000", "al_r8_0x000ab000"},
+ {{al, r8, 0x00156000}, false, al, "al r8 0x00156000", "al_r8_0x00156000"},
+ {{al, r8, 0x002ac000}, false, al, "al r8 0x002ac000", "al_r8_0x002ac000"},
+ {{al, r8, 0x00558000}, false, al, "al r8 0x00558000", "al_r8_0x00558000"},
+ {{al, r8, 0x00ab0000}, false, al, "al r8 0x00ab0000", "al_r8_0x00ab0000"},
+ {{al, r8, 0x01560000}, false, al, "al r8 0x01560000", "al_r8_0x01560000"},
+ {{al, r8, 0x02ac0000}, false, al, "al r8 0x02ac0000", "al_r8_0x02ac0000"},
+ {{al, r8, 0x05580000}, false, al, "al r8 0x05580000", "al_r8_0x05580000"},
+ {{al, r8, 0x0ab00000}, false, al, "al r8 0x0ab00000", "al_r8_0x0ab00000"},
+ {{al, r8, 0x15600000}, false, al, "al r8 0x15600000", "al_r8_0x15600000"},
+ {{al, r8, 0x2ac00000}, false, al, "al r8 0x2ac00000", "al_r8_0x2ac00000"},
+ {{al, r8, 0x55800000}, false, al, "al r8 0x55800000", "al_r8_0x55800000"},
+ {{al, r8, 0xab000000}, false, al, "al r8 0xab000000", "al_r8_0xab000000"},
+ {{al, r8, 0x000000ab}, false, al, "al r8 0x000000ab", "al_r8_0x000000ab"},
+ {{al, r8, 0x00ab00ab}, false, al, "al r8 0x00ab00ab", "al_r8_0x00ab00ab"},
+ {{al, r8, 0xab00ab00}, false, al, "al r8 0xab00ab00", "al_r8_0xab00ab00"},
+ {{al, r8, 0xabababab}, false, al, "al r8 0xabababab", "al_r8_0xabababab"},
+ {{al, r9, 0x000001fe}, false, al, "al r9 0x000001fe", "al_r9_0x000001fe"},
+ {{al, r9, 0x000003fc}, false, al, "al r9 0x000003fc", "al_r9_0x000003fc"},
+ {{al, r9, 0x000007f8}, false, al, "al r9 0x000007f8", "al_r9_0x000007f8"},
+ {{al, r9, 0x00000ff0}, false, al, "al r9 0x00000ff0", "al_r9_0x00000ff0"},
+ {{al, r9, 0x00001fe0}, false, al, "al r9 0x00001fe0", "al_r9_0x00001fe0"},
+ {{al, r9, 0x00003fc0}, false, al, "al r9 0x00003fc0", "al_r9_0x00003fc0"},
+ {{al, r9, 0x00007f80}, false, al, "al r9 0x00007f80", "al_r9_0x00007f80"},
+ {{al, r9, 0x0000ff00}, false, al, "al r9 0x0000ff00", "al_r9_0x0000ff00"},
+ {{al, r9, 0x0001fe00}, false, al, "al r9 0x0001fe00", "al_r9_0x0001fe00"},
+ {{al, r9, 0x0003fc00}, false, al, "al r9 0x0003fc00", "al_r9_0x0003fc00"},
+ {{al, r9, 0x0007f800}, false, al, "al r9 0x0007f800", "al_r9_0x0007f800"},
+ {{al, r9, 0x000ff000}, false, al, "al r9 0x000ff000", "al_r9_0x000ff000"},
+ {{al, r9, 0x001fe000}, false, al, "al r9 0x001fe000", "al_r9_0x001fe000"},
+ {{al, r9, 0x003fc000}, false, al, "al r9 0x003fc000", "al_r9_0x003fc000"},
+ {{al, r9, 0x007f8000}, false, al, "al r9 0x007f8000", "al_r9_0x007f8000"},
+ {{al, r9, 0x00ff0000}, false, al, "al r9 0x00ff0000", "al_r9_0x00ff0000"},
+ {{al, r9, 0x01fe0000}, false, al, "al r9 0x01fe0000", "al_r9_0x01fe0000"},
+ {{al, r9, 0x03fc0000}, false, al, "al r9 0x03fc0000", "al_r9_0x03fc0000"},
+ {{al, r9, 0x07f80000}, false, al, "al r9 0x07f80000", "al_r9_0x07f80000"},
+ {{al, r9, 0x0ff00000}, false, al, "al r9 0x0ff00000", "al_r9_0x0ff00000"},
+ {{al, r9, 0x1fe00000}, false, al, "al r9 0x1fe00000", "al_r9_0x1fe00000"},
+ {{al, r9, 0x3fc00000}, false, al, "al r9 0x3fc00000", "al_r9_0x3fc00000"},
+ {{al, r9, 0x7f800000}, false, al, "al r9 0x7f800000", "al_r9_0x7f800000"},
+ {{al, r9, 0xff000000}, false, al, "al r9 0xff000000", "al_r9_0xff000000"},
+ {{al, r9, 0x000000ff}, false, al, "al r9 0x000000ff", "al_r9_0x000000ff"},
+ {{al, r9, 0x00ff00ff}, false, al, "al r9 0x00ff00ff", "al_r9_0x00ff00ff"},
+ {{al, r9, 0xff00ff00}, false, al, "al r9 0xff00ff00", "al_r9_0xff00ff00"},
+ {{al, r9, 0xffffffff}, false, al, "al r9 0xffffffff", "al_r9_0xffffffff"},
+ {{al, r9, 0x00000156}, false, al, "al r9 0x00000156", "al_r9_0x00000156"},
+ {{al, r9, 0x000002ac}, false, al, "al r9 0x000002ac", "al_r9_0x000002ac"},
+ {{al, r9, 0x00000558}, false, al, "al r9 0x00000558", "al_r9_0x00000558"},
+ {{al, r9, 0x00000ab0}, false, al, "al r9 0x00000ab0", "al_r9_0x00000ab0"},
+ {{al, r9, 0x00001560}, false, al, "al r9 0x00001560", "al_r9_0x00001560"},
+ {{al, r9, 0x00002ac0}, false, al, "al r9 0x00002ac0", "al_r9_0x00002ac0"},
+ {{al, r9, 0x00005580}, false, al, "al r9 0x00005580", "al_r9_0x00005580"},
+ {{al, r9, 0x0000ab00}, false, al, "al r9 0x0000ab00", "al_r9_0x0000ab00"},
+ {{al, r9, 0x00015600}, false, al, "al r9 0x00015600", "al_r9_0x00015600"},
+ {{al, r9, 0x0002ac00}, false, al, "al r9 0x0002ac00", "al_r9_0x0002ac00"},
+ {{al, r9, 0x00055800}, false, al, "al r9 0x00055800", "al_r9_0x00055800"},
+ {{al, r9, 0x000ab000}, false, al, "al r9 0x000ab000", "al_r9_0x000ab000"},
+ {{al, r9, 0x00156000}, false, al, "al r9 0x00156000", "al_r9_0x00156000"},
+ {{al, r9, 0x002ac000}, false, al, "al r9 0x002ac000", "al_r9_0x002ac000"},
+ {{al, r9, 0x00558000}, false, al, "al r9 0x00558000", "al_r9_0x00558000"},
+ {{al, r9, 0x00ab0000}, false, al, "al r9 0x00ab0000", "al_r9_0x00ab0000"},
+ {{al, r9, 0x01560000}, false, al, "al r9 0x01560000", "al_r9_0x01560000"},
+ {{al, r9, 0x02ac0000}, false, al, "al r9 0x02ac0000", "al_r9_0x02ac0000"},
+ {{al, r9, 0x05580000}, false, al, "al r9 0x05580000", "al_r9_0x05580000"},
+ {{al, r9, 0x0ab00000}, false, al, "al r9 0x0ab00000", "al_r9_0x0ab00000"},
+ {{al, r9, 0x15600000}, false, al, "al r9 0x15600000", "al_r9_0x15600000"},
+ {{al, r9, 0x2ac00000}, false, al, "al r9 0x2ac00000", "al_r9_0x2ac00000"},
+ {{al, r9, 0x55800000}, false, al, "al r9 0x55800000", "al_r9_0x55800000"},
+ {{al, r9, 0xab000000}, false, al, "al r9 0xab000000", "al_r9_0xab000000"},
+ {{al, r9, 0x000000ab}, false, al, "al r9 0x000000ab", "al_r9_0x000000ab"},
+ {{al, r9, 0x00ab00ab}, false, al, "al r9 0x00ab00ab", "al_r9_0x00ab00ab"},
+ {{al, r9, 0xab00ab00}, false, al, "al r9 0xab00ab00", "al_r9_0xab00ab00"},
+ {{al, r9, 0xabababab}, false, al, "al r9 0xabababab", "al_r9_0xabababab"},
+ {{al, r10, 0x000001fe},
+ false,
+ al,
+ "al r10 0x000001fe",
+ "al_r10_"
+ "0x000001fe"},
+ {{al, r10, 0x000003fc},
+ false,
+ al,
+ "al r10 0x000003fc",
+ "al_r10_"
+ "0x000003fc"},
+ {{al, r10, 0x000007f8},
+ false,
+ al,
+ "al r10 0x000007f8",
+ "al_r10_"
+ "0x000007f8"},
+ {{al, r10, 0x00000ff0},
+ false,
+ al,
+ "al r10 0x00000ff0",
+ "al_r10_"
+ "0x00000ff0"},
+ {{al, r10, 0x00001fe0},
+ false,
+ al,
+ "al r10 0x00001fe0",
+ "al_r10_"
+ "0x00001fe0"},
+ {{al, r10, 0x00003fc0},
+ false,
+ al,
+ "al r10 0x00003fc0",
+ "al_r10_"
+ "0x00003fc0"},
+ {{al, r10, 0x00007f80},
+ false,
+ al,
+ "al r10 0x00007f80",
+ "al_r10_"
+ "0x00007f80"},
+ {{al, r10, 0x0000ff00},
+ false,
+ al,
+ "al r10 0x0000ff00",
+ "al_r10_"
+ "0x0000ff00"},
+ {{al, r10, 0x0001fe00},
+ false,
+ al,
+ "al r10 0x0001fe00",
+ "al_r10_"
+ "0x0001fe00"},
+ {{al, r10, 0x0003fc00},
+ false,
+ al,
+ "al r10 0x0003fc00",
+ "al_r10_"
+ "0x0003fc00"},
+ {{al, r10, 0x0007f800},
+ false,
+ al,
+ "al r10 0x0007f800",
+ "al_r10_"
+ "0x0007f800"},
+ {{al, r10, 0x000ff000},
+ false,
+ al,
+ "al r10 0x000ff000",
+ "al_r10_"
+ "0x000ff000"},
+ {{al, r10, 0x001fe000},
+ false,
+ al,
+ "al r10 0x001fe000",
+ "al_r10_"
+ "0x001fe000"},
+ {{al, r10, 0x003fc000},
+ false,
+ al,
+ "al r10 0x003fc000",
+ "al_r10_"
+ "0x003fc000"},
+ {{al, r10, 0x007f8000},
+ false,
+ al,
+ "al r10 0x007f8000",
+ "al_r10_"
+ "0x007f8000"},
+ {{al, r10, 0x00ff0000},
+ false,
+ al,
+ "al r10 0x00ff0000",
+ "al_r10_"
+ "0x00ff0000"},
+ {{al, r10, 0x01fe0000},
+ false,
+ al,
+ "al r10 0x01fe0000",
+ "al_r10_"
+ "0x01fe0000"},
+ {{al, r10, 0x03fc0000},
+ false,
+ al,
+ "al r10 0x03fc0000",
+ "al_r10_"
+ "0x03fc0000"},
+ {{al, r10, 0x07f80000},
+ false,
+ al,
+ "al r10 0x07f80000",
+ "al_r10_"
+ "0x07f80000"},
+ {{al, r10, 0x0ff00000},
+ false,
+ al,
+ "al r10 0x0ff00000",
+ "al_r10_"
+ "0x0ff00000"},
+ {{al, r10, 0x1fe00000},
+ false,
+ al,
+ "al r10 0x1fe00000",
+ "al_r10_"
+ "0x1fe00000"},
+ {{al, r10, 0x3fc00000},
+ false,
+ al,
+ "al r10 0x3fc00000",
+ "al_r10_"
+ "0x3fc00000"},
+ {{al, r10, 0x7f800000},
+ false,
+ al,
+ "al r10 0x7f800000",
+ "al_r10_"
+ "0x7f800000"},
+ {{al, r10, 0xff000000},
+ false,
+ al,
+ "al r10 0xff000000",
+ "al_r10_"
+ "0xff000000"},
+ {{al, r10, 0x000000ff},
+ false,
+ al,
+ "al r10 0x000000ff",
+ "al_r10_"
+ "0x000000ff"},
+ {{al, r10, 0x00ff00ff},
+ false,
+ al,
+ "al r10 0x00ff00ff",
+ "al_r10_"
+ "0x00ff00ff"},
+ {{al, r10, 0xff00ff00},
+ false,
+ al,
+ "al r10 0xff00ff00",
+ "al_r10_"
+ "0xff00ff00"},
+ {{al, r10, 0xffffffff},
+ false,
+ al,
+ "al r10 0xffffffff",
+ "al_r10_"
+ "0xffffffff"},
+ {{al, r10, 0x00000156},
+ false,
+ al,
+ "al r10 0x00000156",
+ "al_r10_"
+ "0x00000156"},
+ {{al, r10, 0x000002ac},
+ false,
+ al,
+ "al r10 0x000002ac",
+ "al_r10_"
+ "0x000002ac"},
+ {{al, r10, 0x00000558},
+ false,
+ al,
+ "al r10 0x00000558",
+ "al_r10_"
+ "0x00000558"},
+ {{al, r10, 0x00000ab0},
+ false,
+ al,
+ "al r10 0x00000ab0",
+ "al_r10_"
+ "0x00000ab0"},
+ {{al, r10, 0x00001560},
+ false,
+ al,
+ "al r10 0x00001560",
+ "al_r10_"
+ "0x00001560"},
+ {{al, r10, 0x00002ac0},
+ false,
+ al,
+ "al r10 0x00002ac0",
+ "al_r10_"
+ "0x00002ac0"},
+ {{al, r10, 0x00005580},
+ false,
+ al,
+ "al r10 0x00005580",
+ "al_r10_"
+ "0x00005580"},
+ {{al, r10, 0x0000ab00},
+ false,
+ al,
+ "al r10 0x0000ab00",
+ "al_r10_"
+ "0x0000ab00"},
+ {{al, r10, 0x00015600},
+ false,
+ al,
+ "al r10 0x00015600",
+ "al_r10_"
+ "0x00015600"},
+ {{al, r10, 0x0002ac00},
+ false,
+ al,
+ "al r10 0x0002ac00",
+ "al_r10_"
+ "0x0002ac00"},
+ {{al, r10, 0x00055800},
+ false,
+ al,
+ "al r10 0x00055800",
+ "al_r10_"
+ "0x00055800"},
+ {{al, r10, 0x000ab000},
+ false,
+ al,
+ "al r10 0x000ab000",
+ "al_r10_"
+ "0x000ab000"},
+ {{al, r10, 0x00156000},
+ false,
+ al,
+ "al r10 0x00156000",
+ "al_r10_"
+ "0x00156000"},
+ {{al, r10, 0x002ac000},
+ false,
+ al,
+ "al r10 0x002ac000",
+ "al_r10_"
+ "0x002ac000"},
+ {{al, r10, 0x00558000},
+ false,
+ al,
+ "al r10 0x00558000",
+ "al_r10_"
+ "0x00558000"},
+ {{al, r10, 0x00ab0000},
+ false,
+ al,
+ "al r10 0x00ab0000",
+ "al_r10_"
+ "0x00ab0000"},
+ {{al, r10, 0x01560000},
+ false,
+ al,
+ "al r10 0x01560000",
+ "al_r10_"
+ "0x01560000"},
+ {{al, r10, 0x02ac0000},
+ false,
+ al,
+ "al r10 0x02ac0000",
+ "al_r10_"
+ "0x02ac0000"},
+ {{al, r10, 0x05580000},
+ false,
+ al,
+ "al r10 0x05580000",
+ "al_r10_"
+ "0x05580000"},
+ {{al, r10, 0x0ab00000},
+ false,
+ al,
+ "al r10 0x0ab00000",
+ "al_r10_"
+ "0x0ab00000"},
+ {{al, r10, 0x15600000},
+ false,
+ al,
+ "al r10 0x15600000",
+ "al_r10_"
+ "0x15600000"},
+ {{al, r10, 0x2ac00000},
+ false,
+ al,
+ "al r10 0x2ac00000",
+ "al_r10_"
+ "0x2ac00000"},
+ {{al, r10, 0x55800000},
+ false,
+ al,
+ "al r10 0x55800000",
+ "al_r10_"
+ "0x55800000"},
+ {{al, r10, 0xab000000},
+ false,
+ al,
+ "al r10 0xab000000",
+ "al_r10_"
+ "0xab000000"},
+ {{al, r10, 0x000000ab},
+ false,
+ al,
+ "al r10 0x000000ab",
+ "al_r10_"
+ "0x000000ab"},
+ {{al, r10, 0x00ab00ab},
+ false,
+ al,
+ "al r10 0x00ab00ab",
+ "al_r10_"
+ "0x00ab00ab"},
+ {{al, r10, 0xab00ab00},
+ false,
+ al,
+ "al r10 0xab00ab00",
+ "al_r10_"
+ "0xab00ab00"},
+ {{al, r10, 0xabababab},
+ false,
+ al,
+ "al r10 0xabababab",
+ "al_r10_"
+ "0xabababab"},
+ {{al, r11, 0x000001fe},
+ false,
+ al,
+ "al r11 0x000001fe",
+ "al_r11_"
+ "0x000001fe"},
+ {{al, r11, 0x000003fc},
+ false,
+ al,
+ "al r11 0x000003fc",
+ "al_r11_"
+ "0x000003fc"},
+ {{al, r11, 0x000007f8},
+ false,
+ al,
+ "al r11 0x000007f8",
+ "al_r11_"
+ "0x000007f8"},
+ {{al, r11, 0x00000ff0},
+ false,
+ al,
+ "al r11 0x00000ff0",
+ "al_r11_"
+ "0x00000ff0"},
+ {{al, r11, 0x00001fe0},
+ false,
+ al,
+ "al r11 0x00001fe0",
+ "al_r11_"
+ "0x00001fe0"},
+ {{al, r11, 0x00003fc0},
+ false,
+ al,
+ "al r11 0x00003fc0",
+ "al_r11_"
+ "0x00003fc0"},
+ {{al, r11, 0x00007f80},
+ false,
+ al,
+ "al r11 0x00007f80",
+ "al_r11_"
+ "0x00007f80"},
+ {{al, r11, 0x0000ff00},
+ false,
+ al,
+ "al r11 0x0000ff00",
+ "al_r11_"
+ "0x0000ff00"},
+ {{al, r11, 0x0001fe00},
+ false,
+ al,
+ "al r11 0x0001fe00",
+ "al_r11_"
+ "0x0001fe00"},
+ {{al, r11, 0x0003fc00},
+ false,
+ al,
+ "al r11 0x0003fc00",
+ "al_r11_"
+ "0x0003fc00"},
+ {{al, r11, 0x0007f800},
+ false,
+ al,
+ "al r11 0x0007f800",
+ "al_r11_"
+ "0x0007f800"},
+ {{al, r11, 0x000ff000},
+ false,
+ al,
+ "al r11 0x000ff000",
+ "al_r11_"
+ "0x000ff000"},
+ {{al, r11, 0x001fe000},
+ false,
+ al,
+ "al r11 0x001fe000",
+ "al_r11_"
+ "0x001fe000"},
+ {{al, r11, 0x003fc000},
+ false,
+ al,
+ "al r11 0x003fc000",
+ "al_r11_"
+ "0x003fc000"},
+ {{al, r11, 0x007f8000},
+ false,
+ al,
+ "al r11 0x007f8000",
+ "al_r11_"
+ "0x007f8000"},
+ {{al, r11, 0x00ff0000},
+ false,
+ al,
+ "al r11 0x00ff0000",
+ "al_r11_"
+ "0x00ff0000"},
+ {{al, r11, 0x01fe0000},
+ false,
+ al,
+ "al r11 0x01fe0000",
+ "al_r11_"
+ "0x01fe0000"},
+ {{al, r11, 0x03fc0000},
+ false,
+ al,
+ "al r11 0x03fc0000",
+ "al_r11_"
+ "0x03fc0000"},
+ {{al, r11, 0x07f80000},
+ false,
+ al,
+ "al r11 0x07f80000",
+ "al_r11_"
+ "0x07f80000"},
+ {{al, r11, 0x0ff00000},
+ false,
+ al,
+ "al r11 0x0ff00000",
+ "al_r11_"
+ "0x0ff00000"},
+ {{al, r11, 0x1fe00000},
+ false,
+ al,
+ "al r11 0x1fe00000",
+ "al_r11_"
+ "0x1fe00000"},
+ {{al, r11, 0x3fc00000},
+ false,
+ al,
+ "al r11 0x3fc00000",
+ "al_r11_"
+ "0x3fc00000"},
+ {{al, r11, 0x7f800000},
+ false,
+ al,
+ "al r11 0x7f800000",
+ "al_r11_"
+ "0x7f800000"},
+ {{al, r11, 0xff000000},
+ false,
+ al,
+ "al r11 0xff000000",
+ "al_r11_"
+ "0xff000000"},
+ {{al, r11, 0x000000ff},
+ false,
+ al,
+ "al r11 0x000000ff",
+ "al_r11_"
+ "0x000000ff"},
+ {{al, r11, 0x00ff00ff},
+ false,
+ al,
+ "al r11 0x00ff00ff",
+ "al_r11_"
+ "0x00ff00ff"},
+ {{al, r11, 0xff00ff00},
+ false,
+ al,
+ "al r11 0xff00ff00",
+ "al_r11_"
+ "0xff00ff00"},
+ {{al, r11, 0xffffffff},
+ false,
+ al,
+ "al r11 0xffffffff",
+ "al_r11_"
+ "0xffffffff"},
+ {{al, r11, 0x00000156},
+ false,
+ al,
+ "al r11 0x00000156",
+ "al_r11_"
+ "0x00000156"},
+ {{al, r11, 0x000002ac},
+ false,
+ al,
+ "al r11 0x000002ac",
+ "al_r11_"
+ "0x000002ac"},
+ {{al, r11, 0x00000558},
+ false,
+ al,
+ "al r11 0x00000558",
+ "al_r11_"
+ "0x00000558"},
+ {{al, r11, 0x00000ab0},
+ false,
+ al,
+ "al r11 0x00000ab0",
+ "al_r11_"
+ "0x00000ab0"},
+ {{al, r11, 0x00001560},
+ false,
+ al,
+ "al r11 0x00001560",
+ "al_r11_"
+ "0x00001560"},
+ {{al, r11, 0x00002ac0},
+ false,
+ al,
+ "al r11 0x00002ac0",
+ "al_r11_"
+ "0x00002ac0"},
+ {{al, r11, 0x00005580},
+ false,
+ al,
+ "al r11 0x00005580",
+ "al_r11_"
+ "0x00005580"},
+ {{al, r11, 0x0000ab00},
+ false,
+ al,
+ "al r11 0x0000ab00",
+ "al_r11_"
+ "0x0000ab00"},
+ {{al, r11, 0x00015600},
+ false,
+ al,
+ "al r11 0x00015600",
+ "al_r11_"
+ "0x00015600"},
+ {{al, r11, 0x0002ac00},
+ false,
+ al,
+ "al r11 0x0002ac00",
+ "al_r11_"
+ "0x0002ac00"},
+ {{al, r11, 0x00055800},
+ false,
+ al,
+ "al r11 0x00055800",
+ "al_r11_"
+ "0x00055800"},
+ {{al, r11, 0x000ab000},
+ false,
+ al,
+ "al r11 0x000ab000",
+ "al_r11_"
+ "0x000ab000"},
+ {{al, r11, 0x00156000},
+ false,
+ al,
+ "al r11 0x00156000",
+ "al_r11_"
+ "0x00156000"},
+ {{al, r11, 0x002ac000},
+ false,
+ al,
+ "al r11 0x002ac000",
+ "al_r11_"
+ "0x002ac000"},
+ {{al, r11, 0x00558000},
+ false,
+ al,
+ "al r11 0x00558000",
+ "al_r11_"
+ "0x00558000"},
+ {{al, r11, 0x00ab0000},
+ false,
+ al,
+ "al r11 0x00ab0000",
+ "al_r11_"
+ "0x00ab0000"},
+ {{al, r11, 0x01560000},
+ false,
+ al,
+ "al r11 0x01560000",
+ "al_r11_"
+ "0x01560000"},
+ {{al, r11, 0x02ac0000},
+ false,
+ al,
+ "al r11 0x02ac0000",
+ "al_r11_"
+ "0x02ac0000"},
+ {{al, r11, 0x05580000},
+ false,
+ al,
+ "al r11 0x05580000",
+ "al_r11_"
+ "0x05580000"},
+ {{al, r11, 0x0ab00000},
+ false,
+ al,
+ "al r11 0x0ab00000",
+ "al_r11_"
+ "0x0ab00000"},
+ {{al, r11, 0x15600000},
+ false,
+ al,
+ "al r11 0x15600000",
+ "al_r11_"
+ "0x15600000"},
+ {{al, r11, 0x2ac00000},
+ false,
+ al,
+ "al r11 0x2ac00000",
+ "al_r11_"
+ "0x2ac00000"},
+ {{al, r11, 0x55800000},
+ false,
+ al,
+ "al r11 0x55800000",
+ "al_r11_"
+ "0x55800000"},
+ {{al, r11, 0xab000000},
+ false,
+ al,
+ "al r11 0xab000000",
+ "al_r11_"
+ "0xab000000"},
+ {{al, r11, 0x000000ab},
+ false,
+ al,
+ "al r11 0x000000ab",
+ "al_r11_"
+ "0x000000ab"},
+ {{al, r11, 0x00ab00ab},
+ false,
+ al,
+ "al r11 0x00ab00ab",
+ "al_r11_"
+ "0x00ab00ab"},
+ {{al, r11, 0xab00ab00},
+ false,
+ al,
+ "al r11 0xab00ab00",
+ "al_r11_"
+ "0xab00ab00"},
+ {{al, r11, 0xabababab},
+ false,
+ al,
+ "al r11 0xabababab",
+ "al_r11_"
+ "0xabababab"},
+ {{al, r12, 0x000001fe},
+ false,
+ al,
+ "al r12 0x000001fe",
+ "al_r12_"
+ "0x000001fe"},
+ {{al, r12, 0x000003fc},
+ false,
+ al,
+ "al r12 0x000003fc",
+ "al_r12_"
+ "0x000003fc"},
+ {{al, r12, 0x000007f8},
+ false,
+ al,
+ "al r12 0x000007f8",
+ "al_r12_"
+ "0x000007f8"},
+ {{al, r12, 0x00000ff0},
+ false,
+ al,
+ "al r12 0x00000ff0",
+ "al_r12_"
+ "0x00000ff0"},
+ {{al, r12, 0x00001fe0},
+ false,
+ al,
+ "al r12 0x00001fe0",
+ "al_r12_"
+ "0x00001fe0"},
+ {{al, r12, 0x00003fc0},
+ false,
+ al,
+ "al r12 0x00003fc0",
+ "al_r12_"
+ "0x00003fc0"},
+ {{al, r12, 0x00007f80},
+ false,
+ al,
+ "al r12 0x00007f80",
+ "al_r12_"
+ "0x00007f80"},
+ {{al, r12, 0x0000ff00},
+ false,
+ al,
+ "al r12 0x0000ff00",
+ "al_r12_"
+ "0x0000ff00"},
+ {{al, r12, 0x0001fe00},
+ false,
+ al,
+ "al r12 0x0001fe00",
+ "al_r12_"
+ "0x0001fe00"},
+ {{al, r12, 0x0003fc00},
+ false,
+ al,
+ "al r12 0x0003fc00",
+ "al_r12_"
+ "0x0003fc00"},
+ {{al, r12, 0x0007f800},
+ false,
+ al,
+ "al r12 0x0007f800",
+ "al_r12_"
+ "0x0007f800"},
+ {{al, r12, 0x000ff000},
+ false,
+ al,
+ "al r12 0x000ff000",
+ "al_r12_"
+ "0x000ff000"},
+ {{al, r12, 0x001fe000},
+ false,
+ al,
+ "al r12 0x001fe000",
+ "al_r12_"
+ "0x001fe000"},
+ {{al, r12, 0x003fc000},
+ false,
+ al,
+ "al r12 0x003fc000",
+ "al_r12_"
+ "0x003fc000"},
+ {{al, r12, 0x007f8000},
+ false,
+ al,
+ "al r12 0x007f8000",
+ "al_r12_"
+ "0x007f8000"},
+ {{al, r12, 0x00ff0000},
+ false,
+ al,
+ "al r12 0x00ff0000",
+ "al_r12_"
+ "0x00ff0000"},
+ {{al, r12, 0x01fe0000},
+ false,
+ al,
+ "al r12 0x01fe0000",
+ "al_r12_"
+ "0x01fe0000"},
+ {{al, r12, 0x03fc0000},
+ false,
+ al,
+ "al r12 0x03fc0000",
+ "al_r12_"
+ "0x03fc0000"},
+ {{al, r12, 0x07f80000},
+ false,
+ al,
+ "al r12 0x07f80000",
+ "al_r12_"
+ "0x07f80000"},
+ {{al, r12, 0x0ff00000},
+ false,
+ al,
+ "al r12 0x0ff00000",
+ "al_r12_"
+ "0x0ff00000"},
+ {{al, r12, 0x1fe00000},
+ false,
+ al,
+ "al r12 0x1fe00000",
+ "al_r12_"
+ "0x1fe00000"},
+ {{al, r12, 0x3fc00000},
+ false,
+ al,
+ "al r12 0x3fc00000",
+ "al_r12_"
+ "0x3fc00000"},
+ {{al, r12, 0x7f800000},
+ false,
+ al,
+ "al r12 0x7f800000",
+ "al_r12_"
+ "0x7f800000"},
+ {{al, r12, 0xff000000},
+ false,
+ al,
+ "al r12 0xff000000",
+ "al_r12_"
+ "0xff000000"},
+ {{al, r12, 0x000000ff},
+ false,
+ al,
+ "al r12 0x000000ff",
+ "al_r12_"
+ "0x000000ff"},
+ {{al, r12, 0x00ff00ff},
+ false,
+ al,
+ "al r12 0x00ff00ff",
+ "al_r12_"
+ "0x00ff00ff"},
+ {{al, r12, 0xff00ff00},
+ false,
+ al,
+ "al r12 0xff00ff00",
+ "al_r12_"
+ "0xff00ff00"},
+ {{al, r12, 0xffffffff},
+ false,
+ al,
+ "al r12 0xffffffff",
+ "al_r12_"
+ "0xffffffff"},
+ {{al, r12, 0x00000156},
+ false,
+ al,
+ "al r12 0x00000156",
+ "al_r12_"
+ "0x00000156"},
+ {{al, r12, 0x000002ac},
+ false,
+ al,
+ "al r12 0x000002ac",
+ "al_r12_"
+ "0x000002ac"},
+ {{al, r12, 0x00000558},
+ false,
+ al,
+ "al r12 0x00000558",
+ "al_r12_"
+ "0x00000558"},
+ {{al, r12, 0x00000ab0},
+ false,
+ al,
+ "al r12 0x00000ab0",
+ "al_r12_"
+ "0x00000ab0"},
+ {{al, r12, 0x00001560},
+ false,
+ al,
+ "al r12 0x00001560",
+ "al_r12_"
+ "0x00001560"},
+ {{al, r12, 0x00002ac0},
+ false,
+ al,
+ "al r12 0x00002ac0",
+ "al_r12_"
+ "0x00002ac0"},
+ {{al, r12, 0x00005580},
+ false,
+ al,
+ "al r12 0x00005580",
+ "al_r12_"
+ "0x00005580"},
+ {{al, r12, 0x0000ab00},
+ false,
+ al,
+ "al r12 0x0000ab00",
+ "al_r12_"
+ "0x0000ab00"},
+ {{al, r12, 0x00015600},
+ false,
+ al,
+ "al r12 0x00015600",
+ "al_r12_"
+ "0x00015600"},
+ {{al, r12, 0x0002ac00},
+ false,
+ al,
+ "al r12 0x0002ac00",
+ "al_r12_"
+ "0x0002ac00"},
+ {{al, r12, 0x00055800},
+ false,
+ al,
+ "al r12 0x00055800",
+ "al_r12_"
+ "0x00055800"},
+ {{al, r12, 0x000ab000},
+ false,
+ al,
+ "al r12 0x000ab000",
+ "al_r12_"
+ "0x000ab000"},
+ {{al, r12, 0x00156000},
+ false,
+ al,
+ "al r12 0x00156000",
+ "al_r12_"
+ "0x00156000"},
+ {{al, r12, 0x002ac000},
+ false,
+ al,
+ "al r12 0x002ac000",
+ "al_r12_"
+ "0x002ac000"},
+ {{al, r12, 0x00558000},
+ false,
+ al,
+ "al r12 0x00558000",
+ "al_r12_"
+ "0x00558000"},
+ {{al, r12, 0x00ab0000},
+ false,
+ al,
+ "al r12 0x00ab0000",
+ "al_r12_"
+ "0x00ab0000"},
+ {{al, r12, 0x01560000},
+ false,
+ al,
+ "al r12 0x01560000",
+ "al_r12_"
+ "0x01560000"},
+ {{al, r12, 0x02ac0000},
+ false,
+ al,
+ "al r12 0x02ac0000",
+ "al_r12_"
+ "0x02ac0000"},
+ {{al, r12, 0x05580000},
+ false,
+ al,
+ "al r12 0x05580000",
+ "al_r12_"
+ "0x05580000"},
+ {{al, r12, 0x0ab00000},
+ false,
+ al,
+ "al r12 0x0ab00000",
+ "al_r12_"
+ "0x0ab00000"},
+ {{al, r12, 0x15600000},
+ false,
+ al,
+ "al r12 0x15600000",
+ "al_r12_"
+ "0x15600000"},
+ {{al, r12, 0x2ac00000},
+ false,
+ al,
+ "al r12 0x2ac00000",
+ "al_r12_"
+ "0x2ac00000"},
+ {{al, r12, 0x55800000},
+ false,
+ al,
+ "al r12 0x55800000",
+ "al_r12_"
+ "0x55800000"},
+ {{al, r12, 0xab000000},
+ false,
+ al,
+ "al r12 0xab000000",
+ "al_r12_"
+ "0xab000000"},
+ {{al, r12, 0x000000ab},
+ false,
+ al,
+ "al r12 0x000000ab",
+ "al_r12_"
+ "0x000000ab"},
+ {{al, r12, 0x00ab00ab},
+ false,
+ al,
+ "al r12 0x00ab00ab",
+ "al_r12_"
+ "0x00ab00ab"},
+ {{al, r12, 0xab00ab00},
+ false,
+ al,
+ "al r12 0xab00ab00",
+ "al_r12_"
+ "0xab00ab00"},
+ {{al, r12, 0xabababab},
+ false,
+ al,
+ "al r12 0xabababab",
+ "al_r12_"
+ "0xabababab"},
+ {{al, r13, 0x000001fe},
+ false,
+ al,
+ "al r13 0x000001fe",
+ "al_r13_"
+ "0x000001fe"},
+ {{al, r13, 0x000003fc},
+ false,
+ al,
+ "al r13 0x000003fc",
+ "al_r13_"
+ "0x000003fc"},
+ {{al, r13, 0x000007f8},
+ false,
+ al,
+ "al r13 0x000007f8",
+ "al_r13_"
+ "0x000007f8"},
+ {{al, r13, 0x00000ff0},
+ false,
+ al,
+ "al r13 0x00000ff0",
+ "al_r13_"
+ "0x00000ff0"},
+ {{al, r13, 0x00001fe0},
+ false,
+ al,
+ "al r13 0x00001fe0",
+ "al_r13_"
+ "0x00001fe0"},
+ {{al, r13, 0x00003fc0},
+ false,
+ al,
+ "al r13 0x00003fc0",
+ "al_r13_"
+ "0x00003fc0"},
+ {{al, r13, 0x00007f80},
+ false,
+ al,
+ "al r13 0x00007f80",
+ "al_r13_"
+ "0x00007f80"},
+ {{al, r13, 0x0000ff00},
+ false,
+ al,
+ "al r13 0x0000ff00",
+ "al_r13_"
+ "0x0000ff00"},
+ {{al, r13, 0x0001fe00},
+ false,
+ al,
+ "al r13 0x0001fe00",
+ "al_r13_"
+ "0x0001fe00"},
+ {{al, r13, 0x0003fc00},
+ false,
+ al,
+ "al r13 0x0003fc00",
+ "al_r13_"
+ "0x0003fc00"},
+ {{al, r13, 0x0007f800},
+ false,
+ al,
+ "al r13 0x0007f800",
+ "al_r13_"
+ "0x0007f800"},
+ {{al, r13, 0x000ff000},
+ false,
+ al,
+ "al r13 0x000ff000",
+ "al_r13_"
+ "0x000ff000"},
+ {{al, r13, 0x001fe000},
+ false,
+ al,
+ "al r13 0x001fe000",
+ "al_r13_"
+ "0x001fe000"},
+ {{al, r13, 0x003fc000},
+ false,
+ al,
+ "al r13 0x003fc000",
+ "al_r13_"
+ "0x003fc000"},
+ {{al, r13, 0x007f8000},
+ false,
+ al,
+ "al r13 0x007f8000",
+ "al_r13_"
+ "0x007f8000"},
+ {{al, r13, 0x00ff0000},
+ false,
+ al,
+ "al r13 0x00ff0000",
+ "al_r13_"
+ "0x00ff0000"},
+ {{al, r13, 0x01fe0000},
+ false,
+ al,
+ "al r13 0x01fe0000",
+ "al_r13_"
+ "0x01fe0000"},
+ {{al, r13, 0x03fc0000},
+ false,
+ al,
+ "al r13 0x03fc0000",
+ "al_r13_"
+ "0x03fc0000"},
+ {{al, r13, 0x07f80000},
+ false,
+ al,
+ "al r13 0x07f80000",
+ "al_r13_"
+ "0x07f80000"},
+ {{al, r13, 0x0ff00000},
+ false,
+ al,
+ "al r13 0x0ff00000",
+ "al_r13_"
+ "0x0ff00000"},
+ {{al, r13, 0x1fe00000},
+ false,
+ al,
+ "al r13 0x1fe00000",
+ "al_r13_"
+ "0x1fe00000"},
+ {{al, r13, 0x3fc00000},
+ false,
+ al,
+ "al r13 0x3fc00000",
+ "al_r13_"
+ "0x3fc00000"},
+ {{al, r13, 0x7f800000},
+ false,
+ al,
+ "al r13 0x7f800000",
+ "al_r13_"
+ "0x7f800000"},
+ {{al, r13, 0xff000000},
+ false,
+ al,
+ "al r13 0xff000000",
+ "al_r13_"
+ "0xff000000"},
+ {{al, r13, 0x000000ff},
+ false,
+ al,
+ "al r13 0x000000ff",
+ "al_r13_"
+ "0x000000ff"},
+ {{al, r13, 0x00ff00ff},
+ false,
+ al,
+ "al r13 0x00ff00ff",
+ "al_r13_"
+ "0x00ff00ff"},
+ {{al, r13, 0xff00ff00},
+ false,
+ al,
+ "al r13 0xff00ff00",
+ "al_r13_"
+ "0xff00ff00"},
+ {{al, r13, 0xffffffff},
+ false,
+ al,
+ "al r13 0xffffffff",
+ "al_r13_"
+ "0xffffffff"},
+ {{al, r13, 0x00000156},
+ false,
+ al,
+ "al r13 0x00000156",
+ "al_r13_"
+ "0x00000156"},
+ {{al, r13, 0x000002ac},
+ false,
+ al,
+ "al r13 0x000002ac",
+ "al_r13_"
+ "0x000002ac"},
+ {{al, r13, 0x00000558},
+ false,
+ al,
+ "al r13 0x00000558",
+ "al_r13_"
+ "0x00000558"},
+ {{al, r13, 0x00000ab0},
+ false,
+ al,
+ "al r13 0x00000ab0",
+ "al_r13_"
+ "0x00000ab0"},
+ {{al, r13, 0x00001560},
+ false,
+ al,
+ "al r13 0x00001560",
+ "al_r13_"
+ "0x00001560"},
+ {{al, r13, 0x00002ac0},
+ false,
+ al,
+ "al r13 0x00002ac0",
+ "al_r13_"
+ "0x00002ac0"},
+ {{al, r13, 0x00005580},
+ false,
+ al,
+ "al r13 0x00005580",
+ "al_r13_"
+ "0x00005580"},
+ {{al, r13, 0x0000ab00},
+ false,
+ al,
+ "al r13 0x0000ab00",
+ "al_r13_"
+ "0x0000ab00"},
+ {{al, r13, 0x00015600},
+ false,
+ al,
+ "al r13 0x00015600",
+ "al_r13_"
+ "0x00015600"},
+ {{al, r13, 0x0002ac00},
+ false,
+ al,
+ "al r13 0x0002ac00",
+ "al_r13_"
+ "0x0002ac00"},
+ {{al, r13, 0x00055800},
+ false,
+ al,
+ "al r13 0x00055800",
+ "al_r13_"
+ "0x00055800"},
+ {{al, r13, 0x000ab000},
+ false,
+ al,
+ "al r13 0x000ab000",
+ "al_r13_"
+ "0x000ab000"},
+ {{al, r13, 0x00156000},
+ false,
+ al,
+ "al r13 0x00156000",
+ "al_r13_"
+ "0x00156000"},
+ {{al, r13, 0x002ac000},
+ false,
+ al,
+ "al r13 0x002ac000",
+ "al_r13_"
+ "0x002ac000"},
+ {{al, r13, 0x00558000},
+ false,
+ al,
+ "al r13 0x00558000",
+ "al_r13_"
+ "0x00558000"},
+ {{al, r13, 0x00ab0000},
+ false,
+ al,
+ "al r13 0x00ab0000",
+ "al_r13_"
+ "0x00ab0000"},
+ {{al, r13, 0x01560000},
+ false,
+ al,
+ "al r13 0x01560000",
+ "al_r13_"
+ "0x01560000"},
+ {{al, r13, 0x02ac0000},
+ false,
+ al,
+ "al r13 0x02ac0000",
+ "al_r13_"
+ "0x02ac0000"},
+ {{al, r13, 0x05580000},
+ false,
+ al,
+ "al r13 0x05580000",
+ "al_r13_"
+ "0x05580000"},
+ {{al, r13, 0x0ab00000},
+ false,
+ al,
+ "al r13 0x0ab00000",
+ "al_r13_"
+ "0x0ab00000"},
+ {{al, r13, 0x15600000},
+ false,
+ al,
+ "al r13 0x15600000",
+ "al_r13_"
+ "0x15600000"},
+ {{al, r13, 0x2ac00000},
+ false,
+ al,
+ "al r13 0x2ac00000",
+ "al_r13_"
+ "0x2ac00000"},
+ {{al, r13, 0x55800000},
+ false,
+ al,
+ "al r13 0x55800000",
+ "al_r13_"
+ "0x55800000"},
+ {{al, r13, 0xab000000},
+ false,
+ al,
+ "al r13 0xab000000",
+ "al_r13_"
+ "0xab000000"},
+ {{al, r13, 0x000000ab},
+ false,
+ al,
+ "al r13 0x000000ab",
+ "al_r13_"
+ "0x000000ab"},
+ {{al, r13, 0x00ab00ab},
+ false,
+ al,
+ "al r13 0x00ab00ab",
+ "al_r13_"
+ "0x00ab00ab"},
+ {{al, r13, 0xab00ab00},
+ false,
+ al,
+ "al r13 0xab00ab00",
+ "al_r13_"
+ "0xab00ab00"},
+ {{al, r13, 0xabababab},
+ false,
+ al,
+ "al r13 0xabababab",
+ "al_r13_"
+ "0xabababab"},
+ {{al, r14, 0x000001fe},
+ false,
+ al,
+ "al r14 0x000001fe",
+ "al_r14_"
+ "0x000001fe"},
+ {{al, r14, 0x000003fc},
+ false,
+ al,
+ "al r14 0x000003fc",
+ "al_r14_"
+ "0x000003fc"},
+ {{al, r14, 0x000007f8},
+ false,
+ al,
+ "al r14 0x000007f8",
+ "al_r14_"
+ "0x000007f8"},
+ {{al, r14, 0x00000ff0},
+ false,
+ al,
+ "al r14 0x00000ff0",
+ "al_r14_"
+ "0x00000ff0"},
+ {{al, r14, 0x00001fe0},
+ false,
+ al,
+ "al r14 0x00001fe0",
+ "al_r14_"
+ "0x00001fe0"},
+ {{al, r14, 0x00003fc0},
+ false,
+ al,
+ "al r14 0x00003fc0",
+ "al_r14_"
+ "0x00003fc0"},
+ {{al, r14, 0x00007f80},
+ false,
+ al,
+ "al r14 0x00007f80",
+ "al_r14_"
+ "0x00007f80"},
+ {{al, r14, 0x0000ff00},
+ false,
+ al,
+ "al r14 0x0000ff00",
+ "al_r14_"
+ "0x0000ff00"},
+ {{al, r14, 0x0001fe00},
+ false,
+ al,
+ "al r14 0x0001fe00",
+ "al_r14_"
+ "0x0001fe00"},
+ {{al, r14, 0x0003fc00},
+ false,
+ al,
+ "al r14 0x0003fc00",
+ "al_r14_"
+ "0x0003fc00"},
+ {{al, r14, 0x0007f800},
+ false,
+ al,
+ "al r14 0x0007f800",
+ "al_r14_"
+ "0x0007f800"},
+ {{al, r14, 0x000ff000},
+ false,
+ al,
+ "al r14 0x000ff000",
+ "al_r14_"
+ "0x000ff000"},
+ {{al, r14, 0x001fe000},
+ false,
+ al,
+ "al r14 0x001fe000",
+ "al_r14_"
+ "0x001fe000"},
+ {{al, r14, 0x003fc000},
+ false,
+ al,
+ "al r14 0x003fc000",
+ "al_r14_"
+ "0x003fc000"},
+ {{al, r14, 0x007f8000},
+ false,
+ al,
+ "al r14 0x007f8000",
+ "al_r14_"
+ "0x007f8000"},
+ {{al, r14, 0x00ff0000},
+ false,
+ al,
+ "al r14 0x00ff0000",
+ "al_r14_"
+ "0x00ff0000"},
+ {{al, r14, 0x01fe0000},
+ false,
+ al,
+ "al r14 0x01fe0000",
+ "al_r14_"
+ "0x01fe0000"},
+ {{al, r14, 0x03fc0000},
+ false,
+ al,
+ "al r14 0x03fc0000",
+ "al_r14_"
+ "0x03fc0000"},
+ {{al, r14, 0x07f80000},
+ false,
+ al,
+ "al r14 0x07f80000",
+ "al_r14_"
+ "0x07f80000"},
+ {{al, r14, 0x0ff00000},
+ false,
+ al,
+ "al r14 0x0ff00000",
+ "al_r14_"
+ "0x0ff00000"},
+ {{al, r14, 0x1fe00000},
+ false,
+ al,
+ "al r14 0x1fe00000",
+ "al_r14_"
+ "0x1fe00000"},
+ {{al, r14, 0x3fc00000},
+ false,
+ al,
+ "al r14 0x3fc00000",
+ "al_r14_"
+ "0x3fc00000"},
+ {{al, r14, 0x7f800000},
+ false,
+ al,
+ "al r14 0x7f800000",
+ "al_r14_"
+ "0x7f800000"},
+ {{al, r14, 0xff000000},
+ false,
+ al,
+ "al r14 0xff000000",
+ "al_r14_"
+ "0xff000000"},
+ {{al, r14, 0x000000ff},
+ false,
+ al,
+ "al r14 0x000000ff",
+ "al_r14_"
+ "0x000000ff"},
+ {{al, r14, 0x00ff00ff},
+ false,
+ al,
+ "al r14 0x00ff00ff",
+ "al_r14_"
+ "0x00ff00ff"},
+ {{al, r14, 0xff00ff00},
+ false,
+ al,
+ "al r14 0xff00ff00",
+ "al_r14_"
+ "0xff00ff00"},
+ {{al, r14, 0xffffffff},
+ false,
+ al,
+ "al r14 0xffffffff",
+ "al_r14_"
+ "0xffffffff"},
+ {{al, r14, 0x00000156},
+ false,
+ al,
+ "al r14 0x00000156",
+ "al_r14_"
+ "0x00000156"},
+ {{al, r14, 0x000002ac},
+ false,
+ al,
+ "al r14 0x000002ac",
+ "al_r14_"
+ "0x000002ac"},
+ {{al, r14, 0x00000558},
+ false,
+ al,
+ "al r14 0x00000558",
+ "al_r14_"
+ "0x00000558"},
+ {{al, r14, 0x00000ab0},
+ false,
+ al,
+ "al r14 0x00000ab0",
+ "al_r14_"
+ "0x00000ab0"},
+ {{al, r14, 0x00001560},
+ false,
+ al,
+ "al r14 0x00001560",
+ "al_r14_"
+ "0x00001560"},
+ {{al, r14, 0x00002ac0},
+ false,
+ al,
+ "al r14 0x00002ac0",
+ "al_r14_"
+ "0x00002ac0"},
+ {{al, r14, 0x00005580},
+ false,
+ al,
+ "al r14 0x00005580",
+ "al_r14_"
+ "0x00005580"},
+ {{al, r14, 0x0000ab00},
+ false,
+ al,
+ "al r14 0x0000ab00",
+ "al_r14_"
+ "0x0000ab00"},
+ {{al, r14, 0x00015600},
+ false,
+ al,
+ "al r14 0x00015600",
+ "al_r14_"
+ "0x00015600"},
+ {{al, r14, 0x0002ac00},
+ false,
+ al,
+ "al r14 0x0002ac00",
+ "al_r14_"
+ "0x0002ac00"},
+ {{al, r14, 0x00055800},
+ false,
+ al,
+ "al r14 0x00055800",
+ "al_r14_"
+ "0x00055800"},
+ {{al, r14, 0x000ab000},
+ false,
+ al,
+ "al r14 0x000ab000",
+ "al_r14_"
+ "0x000ab000"},
+ {{al, r14, 0x00156000},
+ false,
+ al,
+ "al r14 0x00156000",
+ "al_r14_"
+ "0x00156000"},
+ {{al, r14, 0x002ac000},
+ false,
+ al,
+ "al r14 0x002ac000",
+ "al_r14_"
+ "0x002ac000"},
+ {{al, r14, 0x00558000},
+ false,
+ al,
+ "al r14 0x00558000",
+ "al_r14_"
+ "0x00558000"},
+ {{al, r14, 0x00ab0000},
+ false,
+ al,
+ "al r14 0x00ab0000",
+ "al_r14_"
+ "0x00ab0000"},
+ {{al, r14, 0x01560000},
+ false,
+ al,
+ "al r14 0x01560000",
+ "al_r14_"
+ "0x01560000"},
+ {{al, r14, 0x02ac0000},
+ false,
+ al,
+ "al r14 0x02ac0000",
+ "al_r14_"
+ "0x02ac0000"},
+ {{al, r14, 0x05580000},
+ false,
+ al,
+ "al r14 0x05580000",
+ "al_r14_"
+ "0x05580000"},
+ {{al, r14, 0x0ab00000},
+ false,
+ al,
+ "al r14 0x0ab00000",
+ "al_r14_"
+ "0x0ab00000"},
+ {{al, r14, 0x15600000},
+ false,
+ al,
+ "al r14 0x15600000",
+ "al_r14_"
+ "0x15600000"},
+ {{al, r14, 0x2ac00000},
+ false,
+ al,
+ "al r14 0x2ac00000",
+ "al_r14_"
+ "0x2ac00000"},
+ {{al, r14, 0x55800000},
+ false,
+ al,
+ "al r14 0x55800000",
+ "al_r14_"
+ "0x55800000"},
+ {{al, r14, 0xab000000},
+ false,
+ al,
+ "al r14 0xab000000",
+ "al_r14_"
+ "0xab000000"},
+ {{al, r14, 0x000000ab},
+ false,
+ al,
+ "al r14 0x000000ab",
+ "al_r14_"
+ "0x000000ab"},
+ {{al, r14, 0x00ab00ab},
+ false,
+ al,
+ "al r14 0x00ab00ab",
+ "al_r14_"
+ "0x00ab00ab"},
+ {{al, r14, 0xab00ab00},
+ false,
+ al,
+ "al r14 0xab00ab00",
+ "al_r14_"
+ "0xab00ab00"},
+ {{al, r14, 0xabababab},
+ false,
+ al,
+ "al r14 0xabababab",
+ "al_r14_0xabababab"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-a32.cc b/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-a32.cc
index 731feef..e94fe3d 100644
--- a/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-a32.cc
+++ b/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-a32.cc
@@ -2196,8 +2196,12 @@
{{vc, r9, r9, LSL, r7}, false, al, "vc r9 r9 LSL r7", "vc_r9_r9_LSL_r7"},
{{mi, r7, r0, LSL, r4}, false, al, "mi r7 r0 LSL r4", "mi_r7_r0_LSL_r4"},
{{cc, r2, r10, ASR, r7}, false, al, "cc r2 r10 ASR r7", "cc_r2_r10_ASR_r7"},
- {{cs, r5, r10, LSR, r9}, false, al, "cs r5 r10 LSR r9", "cs_r5_r10_LSR_"
- "r9"}};
+ {{cs, r5, r10, LSR, r9},
+ false,
+ al,
+ "cs r5 r10 LSR r9",
+ "cs_r5_r10_LSR_"
+ "r9"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-t32.cc b/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-t32.cc
index 8c1826d..4772e98 100644
--- a/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-t32.cc
+++ b/test/aarch32/test-assembler-cond-rd-operand-rn-shift-rs-t32.cc
@@ -2138,8 +2138,12 @@
{{al, r0, r7, ROR, r9}, false, al, "al r0 r7 ROR r9", "al_r0_r7_ROR_r9"},
{{al, r9, r3, ROR, r13}, false, al, "al r9 r3 ROR r13", "al_r9_r3_ROR_r13"},
{{al, r3, r8, LSR, r4}, false, al, "al r3 r8 LSR r4", "al_r3_r8_LSR_r4"},
- {{al, r5, r10, ROR, r4}, false, al, "al r5 r10 ROR r4", "al_r5_r10_ROR_"
- "r4"}};
+ {{al, r5, r10, ROR, r4},
+ false,
+ al,
+ "al r5 r10 ROR r4",
+ "al_r5_r10_ROR_"
+ "r4"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-assembler-cond-rdlow-operand-imm8-t32.cc b/test/aarch32/test-assembler-cond-rdlow-operand-imm8-t32.cc
index 05fc1c9..37b25ce 100644
--- a/test/aarch32/test-assembler-cond-rdlow-operand-imm8-t32.cc
+++ b/test/aarch32/test-assembler-cond-rdlow-operand-imm8-t32.cc
@@ -2140,8 +2140,12 @@
{{al, r7, 252}, false, al, "al r7 252", "al_r7_252"},
{{al, r7, 253}, false, al, "al r7 253", "al_r7_253"},
{{al, r7, 254}, false, al, "al r7 254", "al_r7_254"},
- {{al, r7, 255}, false, al, "al r7 255", "al_r7_"
- "255"}};
+ {{al, r7, 255},
+ false,
+ al,
+ "al r7 255",
+ "al_r7_"
+ "255"}};
// These headers each contain an array of `TestResult` with the reference output
// values. The reference arrays are names `kReference{mnemonic}`.
diff --git a/test/aarch32/test-disasm-a32.cc b/test/aarch32/test-disasm-a32.cc
index 3145935..45239c2 100644
--- a/test/aarch32/test-disasm-a32.cc
+++ b/test/aarch32/test-disasm-a32.cc
@@ -42,132 +42,146 @@
namespace aarch32 {
#define __ masm.
-#define TEST(name) TEST_(AARCH32_DISASM_##name)
+#define TEST(name) TEST_(AARCH32_DISASM_##name)
#ifdef VIXL_INCLUDE_TARGET_T32
-#define TEST_T32(name) TEST_(AARCH32_DISASM_##name)
+#define TEST_T32(name) TEST_(AARCH32_DISASM_##name)
#else
-#define TEST_T32(name) void Test##name()
+#define TEST_T32(name) void Test##name()
#endif
#ifdef VIXL_INCLUDE_TARGET_A32
-#define TEST_A32(name) TEST_(AARCH32_DISASM_##name)
+#define TEST_A32(name) TEST_(AARCH32_DISASM_##name)
#else
-#define TEST_A32(name) void Test##name()
+#define TEST_A32(name) void Test##name()
#endif
#define BUF_SIZE (4096)
-#define SETUP() \
- MacroAssembler masm(BUF_SIZE);
+#define SETUP() MacroAssembler masm(BUF_SIZE);
#define CLEANUP()
#ifdef VIXL_NEGATIVE_TESTING
-#define START_COMPARE() \
- { \
- try { \
+#define START_COMPARE() \
+ { \
+ try { \
int32_t start = masm.GetCursorOffset();
-#define END_COMPARE_CHECK_SIZE(EXP,SIZE) \
- int32_t end = masm.GetCursorOffset(); \
- masm.FinalizeCode(); \
- std::ostringstream ss; \
- TestDisassembler disassembler(ss, 0); \
- if (masm.IsUsingT32()) { \
- disassembler.DisassembleT32(*masm.GetBuffer(), start, end); \
- } else { \
- disassembler.DisassembleA32(*masm.GetBuffer(), start, end); \
- } \
- masm.GetBuffer()->Reset(); \
- if (Test::disassemble()) { \
- printf("----\n"); \
- printf("%s", ss.str().c_str()); \
- } \
- if (std::string(EXP) != ss.str()) { \
- printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", __FILE__, __LINE__, \
- masm.IsUsingT32() ? "T32" : "A32", ss.str().c_str(), EXP); \
- abort(); \
- } \
- if ((SIZE) != -1 && ((end - start) != (SIZE))) { \
- printf("\nExpected %d bits, found %d bits\n", \
- 8 * (SIZE), 8 * (end - start)); \
- abort(); \
- } \
- } catch (std::runtime_error e) { \
- const char *msg = e.what(); \
- printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", __FILE__, __LINE__, \
- masm.IsUsingT32() ? "T32" : "A32", msg, EXP); \
- abort(); \
- } \
+#define END_COMPARE_CHECK_SIZE(EXP, SIZE) \
+ int32_t end = masm.GetCursorOffset(); \
+ masm.FinalizeCode(); \
+ std::ostringstream ss; \
+ TestDisassembler disassembler(ss, 0); \
+ if (masm.IsUsingT32()) { \
+ disassembler.DisassembleT32(*masm.GetBuffer(), start, end); \
+ } else { \
+ disassembler.DisassembleA32(*masm.GetBuffer(), start, end); \
+ } \
+ masm.GetBuffer()->Reset(); \
+ if (Test::disassemble()) { \
+ printf("----\n"); \
+ printf("%s", ss.str().c_str()); \
+ } \
+ if (std::string(EXP) != ss.str()) { \
+ printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", \
+ __FILE__, \
+ __LINE__, \
+ masm.IsUsingT32() ? "T32" : "A32", \
+ ss.str().c_str(), \
+ EXP); \
+ abort(); \
+ } \
+ if ((SIZE) != -1 && ((end - start) != (SIZE))) { \
+ printf("\nExpected %d bits, found %d bits\n", \
+ 8 * (SIZE), \
+ 8 * (end - start)); \
+ abort(); \
+ } \
+ } \
+ catch (std::runtime_error e) { \
+ const char* msg = e.what(); \
+ printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", \
+ __FILE__, \
+ __LINE__, \
+ masm.IsUsingT32() ? "T32" : "A32", \
+ msg, \
+ EXP); \
+ abort(); \
+ } \
}
#else
-#define START_COMPARE() \
- { \
+#define START_COMPARE() \
+ { \
int32_t start = masm.GetCursorOffset();
-#define END_COMPARE_CHECK_SIZE(EXP,SIZE) \
- int32_t end = masm.GetCursorOffset(); \
- masm.FinalizeCode(); \
- std::ostringstream ss; \
- TestDisassembler disassembler(ss, 0); \
- if (masm.IsUsingT32()) { \
- disassembler.DisassembleT32(*masm.GetBuffer(), start, end); \
- } else { \
- disassembler.DisassembleA32(*masm.GetBuffer(), start, end); \
- } \
- masm.GetBuffer()->Reset(); \
- if (Test::disassemble()) { \
- printf("----\n"); \
- printf("%s", ss.str().c_str()); \
- } \
- if (std::string(EXP) != ss.str()) { \
- printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", __FILE__, __LINE__, \
- masm.IsUsingT32() ? "T32" : "A32", ss.str().c_str(), EXP); \
- abort(); \
- } \
- if ((SIZE) != -1 && ((end - start) != (SIZE))) { \
- printf("\nExpected %d bits, found %d bits\n", \
- 8 * (SIZE), 8 * (end - start)); \
- abort(); \
- } \
+#define END_COMPARE_CHECK_SIZE(EXP, SIZE) \
+ int32_t end = masm.GetCursorOffset(); \
+ masm.FinalizeCode(); \
+ std::ostringstream ss; \
+ TestDisassembler disassembler(ss, 0); \
+ if (masm.IsUsingT32()) { \
+ disassembler.DisassembleT32(*masm.GetBuffer(), start, end); \
+ } else { \
+ disassembler.DisassembleA32(*masm.GetBuffer(), start, end); \
+ } \
+ masm.GetBuffer()->Reset(); \
+ if (Test::disassemble()) { \
+ printf("----\n"); \
+ printf("%s", ss.str().c_str()); \
+ } \
+ if (std::string(EXP) != ss.str()) { \
+ printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s", \
+ __FILE__, \
+ __LINE__, \
+ masm.IsUsingT32() ? "T32" : "A32", \
+ ss.str().c_str(), \
+ EXP); \
+ abort(); \
+ } \
+ if ((SIZE) != -1 && ((end - start) != (SIZE))) { \
+ printf("\nExpected %d bits, found %d bits\n", \
+ 8 * (SIZE), \
+ 8 * (end - start)); \
+ abort(); \
+ } \
}
#endif
-#define END_COMPARE(EXP) END_COMPARE_CHECK_SIZE(EXP,-1)
+#define END_COMPARE(EXP) END_COMPARE_CHECK_SIZE(EXP, -1)
#ifdef VIXL_INCLUDE_TARGET_A32
-#define COMPARE_A32(ASM, EXP) \
- masm.UseA32(); \
- START_COMPARE() \
- masm.ASM; \
+#define COMPARE_A32(ASM, EXP) \
+ masm.UseA32(); \
+ START_COMPARE() \
+ masm.ASM; \
END_COMPARE(EXP)
#else
#define COMPARE_A32(ASM, EXP)
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
-#define COMPARE_T32(ASM, EXP) \
- masm.UseT32(); \
- START_COMPARE() \
- masm.ASM; \
+#define COMPARE_T32(ASM, EXP) \
+ masm.UseT32(); \
+ START_COMPARE() \
+ masm.ASM; \
END_COMPARE(EXP)
#else
#define COMPARE_T32(ASM, EXP)
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
-#define COMPARE_T32_CHECK_SIZE(ASM, EXP, SIZE) \
- masm.UseT32(); \
- START_COMPARE() \
- masm.ASM; \
+#define COMPARE_T32_CHECK_SIZE(ASM, EXP, SIZE) \
+ masm.UseT32(); \
+ START_COMPARE() \
+ masm.ASM; \
END_COMPARE_CHECK_SIZE(EXP, SIZE)
#else
#define COMPARE_T32_CHECK_SIZE(ASM, EXP, SIZE)
#endif
-#define COMPARE_BOTH(ASM, EXP) \
- COMPARE_A32(ASM, EXP) \
+#define COMPARE_BOTH(ASM, EXP) \
+ COMPARE_A32(ASM, EXP) \
COMPARE_T32(ASM, EXP)
#ifdef VIXL_NEGATIVE_TESTING
@@ -175,18 +189,21 @@
{ \
try { \
int32_t start = masm.GetCursorOffset(); \
- ASM \
- int32_t end = masm.GetCursorOffset(); \
+ ASM int32_t end = masm.GetCursorOffset(); \
masm.FinalizeCode(); \
if (!TEMPORARILY_ACCEPTED) { \
std::ostringstream ss; \
PrintDisassembler disassembler(ss, 0); \
if (masm.IsUsingT32()) { \
- disassembler.DisassembleT32Buffer( \
- masm.GetBuffer()->GetOffsetAddress<uint16_t*>(start), end); \
+ disassembler.DisassembleT32Buffer(masm.GetBuffer() \
+ ->GetOffsetAddress<uint16_t*>( \
+ start), \
+ end); \
} else { \
- disassembler.DisassembleA32Buffer( \
- masm.GetBuffer()->GetOffsetAddress<uint32_t*>(start), end); \
+ disassembler.DisassembleA32Buffer(masm.GetBuffer() \
+ ->GetOffsetAddress<uint32_t*>( \
+ start), \
+ end); \
} \
printf("\n%s:%d:%s\nNo exception raised.\n", \
__FILE__, \
@@ -196,13 +213,14 @@
abort(); \
} \
} catch (std::runtime_error e) { \
- const char *msg = e.what(); \
+ const char* msg = e.what(); \
size_t exp_len = strlen(EXP); \
if (TEMPORARILY_ACCEPTED) { \
- printf("\nNegative MacroAssembler test that was temporarily " \
- "assembling a deprecated or unpredictable instruction is now " \
- "correctly raising an exception. Please update the " \
- "test to reflect this.\n"); \
+ printf( \
+ "\nNegative MacroAssembler test that was temporarily " \
+ "assembling a deprecated or unpredictable instruction is now " \
+ "correctly raising an exception. Please update the " \
+ "test to reflect this.\n"); \
printf("at: %s:%d:%s\n", \
__FILE__, \
__LINE__, \
@@ -212,69 +230,83 @@
printf("\n%s:%d:%s\nFound:\n%sExpected:\n%s...", \
__FILE__, \
__LINE__, \
- masm.IsUsingT32() ? "T32" : "A32", msg, EXP); \
+ masm.IsUsingT32() ? "T32" : "A32", \
+ msg, \
+ EXP); \
abort(); \
} \
} \
}
#ifdef VIXL_INCLUDE_TARGET_A32
-#define MUST_FAIL_TEST_A32(ASM, EXP) \
- masm.UseA32(); \
- NEGATIVE_TEST({ masm.ASM; }, EXP, false) \
+#define MUST_FAIL_TEST_A32(ASM, EXP) \
+ masm.UseA32(); \
+ NEGATIVE_TEST({ masm.ASM; }, EXP, false) \
masm.GetBuffer()->Reset();
#else
#define MUST_FAIL_TEST_A32(ASM, EXP)
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
-#define MUST_FAIL_TEST_T32(ASM, EXP) \
- masm.UseT32(); \
- NEGATIVE_TEST({ masm.ASM; }, EXP, false) \
+#define MUST_FAIL_TEST_T32(ASM, EXP) \
+ masm.UseT32(); \
+ NEGATIVE_TEST({ masm.ASM; }, EXP, false) \
masm.GetBuffer()->Reset();
#else
#define MUST_FAIL_TEST_T32(ASM, EXP)
#endif
-#define MUST_FAIL_TEST_BOTH(ASM, EXP) \
- MUST_FAIL_TEST_A32(ASM, EXP) \
+#define MUST_FAIL_TEST_BOTH(ASM, EXP) \
+ MUST_FAIL_TEST_A32(ASM, EXP) \
MUST_FAIL_TEST_T32(ASM, EXP)
#ifdef VIXL_INCLUDE_TARGET_A32
-#define MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
- masm.UseA32(); \
- NEGATIVE_TEST(ASM, EXP, false) \
+#define MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
+ masm.UseA32(); \
+ NEGATIVE_TEST(ASM, EXP, false) \
masm.GetBuffer()->Reset();
#else
#define MUST_FAIL_TEST_A32_BLOCK(ASM, EXP)
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
-#define MUST_FAIL_TEST_T32_BLOCK(ASM, EXP) \
- masm.UseT32(); \
- NEGATIVE_TEST(ASM, EXP, false) \
+#define MUST_FAIL_TEST_T32_BLOCK(ASM, EXP) \
+ masm.UseT32(); \
+ NEGATIVE_TEST(ASM, EXP, false) \
masm.GetBuffer()->Reset();
#else
#define MUST_FAIL_TEST_T32_BLOCK(ASM, EXP)
#endif
-#define MUST_FAIL_TEST_BOTH_BLOCK(ASM, EXP) \
- MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
+#define MUST_FAIL_TEST_BOTH_BLOCK(ASM, EXP) \
+ MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
MUST_FAIL_TEST_T32_BLOCK(ASM, EXP)
#else
// Skip negative tests.
-#define MUST_FAIL_TEST_A32(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define MUST_FAIL_TEST_T32(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define MUST_FAIL_TEST_BOTH(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define MUST_FAIL_TEST_T32_BLOCK(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define MUST_FAIL_TEST_BOTH_BLOCK(ASM, EXP) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_A32(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_T32(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_BOTH(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_A32_BLOCK(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_T32_BLOCK(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define MUST_FAIL_TEST_BOTH_BLOCK(ASM, EXP) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
#endif
#ifdef VIXL_NEGATIVE_TESTING
@@ -300,39 +332,42 @@
SHOULD_FAIL_TEST_A32(ASM) \
SHOULD_FAIL_TEST_T32(ASM)
#else
-#define SHOULD_FAIL_TEST_A32(ASM) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define SHOULD_FAIL_TEST_T32(ASM) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
-#define SHOULD_FAIL_TEST_BOTH(ASM) \
- printf("Skipping negative tests. To enable them, build with 'negative_testing=on'.\n");
+#define SHOULD_FAIL_TEST_A32(ASM) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define SHOULD_FAIL_TEST_T32(ASM) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
+#define SHOULD_FAIL_TEST_BOTH(ASM) \
+ printf( \
+ "Skipping negative tests. To enable them, build with " \
+ "'negative_testing=on'.\n");
#endif
class TestDisassembler : public PrintDisassembler {
public:
TestDisassembler(std::ostream& os, uint32_t pc) // NOLINT(runtime/references)
- : PrintDisassembler(os, pc) {
- }
+ : PrintDisassembler(os, pc) {}
virtual void PrintCodeAddress(uint32_t code_address) VIXL_OVERRIDE {
USE(code_address);
}
- virtual void PrintOpcode16(uint32_t opcode) VIXL_OVERRIDE {
- USE(opcode);
- }
+ virtual void PrintOpcode16(uint32_t opcode) VIXL_OVERRIDE { USE(opcode); }
- virtual void PrintOpcode32(uint32_t opcode) VIXL_OVERRIDE {
- USE(opcode);
- }
+ virtual void PrintOpcode32(uint32_t opcode) VIXL_OVERRIDE { USE(opcode); }
- void DisassembleA32(const CodeBuffer& buffer, ptrdiff_t start,
+ void DisassembleA32(const CodeBuffer& buffer,
+ ptrdiff_t start,
ptrdiff_t end) {
DisassembleA32Buffer(buffer.GetOffsetAddress<const uint32_t*>(start),
end - start);
}
- void DisassembleT32(const CodeBuffer& buffer, ptrdiff_t start,
+ void DisassembleT32(const CodeBuffer& buffer,
+ ptrdiff_t start,
ptrdiff_t end) {
DisassembleT32Buffer(buffer.GetOffsetAddress<const uint16_t*>(start),
end - start);
@@ -347,8 +382,9 @@
START_COMPARE()
masm.Add(r9, r10, r11);
masm.GetBuffer()->Emit16(kLowestT32_32Opcode >> 16);
- END_COMPARE("add r9, r10, r11\n"
- "?\n");
+ END_COMPARE(
+ "add r9, r10, r11\n"
+ "?\n");
CLEANUP();
}
@@ -361,8 +397,9 @@
START_COMPARE()
masm.Add(r9, r10, r11);
masm.Add(r0, r0, r1);
- END_COMPARE("add r9, r10, r11\n"
- "add r0, r1\n");
+ END_COMPARE(
+ "add r9, r10, r11\n"
+ "add r0, r1\n");
CLEANUP();
}
@@ -373,10 +410,8 @@
// - Identities.
- COMPARE_BOTH(Orn(r0, r1, 0),
- "mvn r0, #0\n");
- COMPARE_BOTH(Orn(r0, r0, 0xffffffff),
- "");
+ COMPARE_BOTH(Orn(r0, r1, 0), "mvn r0, #0\n");
+ COMPARE_BOTH(Orn(r0, r0, 0xffffffff), "");
// - Immediate form. This form does not need macro-assembler support
// for T32.
@@ -391,12 +426,9 @@
"orrs r0, ip\n");
// - Too large immediate form.
- COMPARE_BOTH(Orn(r0, r1, 0x00ffffff),
- "orr r0, r1, #0xff000000\n");
- COMPARE_BOTH(Orn(r0, r1, 0xff00ffff),
- "orr r0, r1, #0xff0000\n");
- COMPARE_BOTH(Orns(r0, r1, 0x00ffffff),
- "orrs r0, r1, #0xff000000\n");
+ COMPARE_BOTH(Orn(r0, r1, 0x00ffffff), "orr r0, r1, #0xff000000\n");
+ COMPARE_BOTH(Orn(r0, r1, 0xff00ffff), "orr r0, r1, #0xff0000\n");
+ COMPARE_BOTH(Orns(r0, r1, 0x00ffffff), "orrs r0, r1, #0xff000000\n");
COMPARE_A32(Orns(r0, r1, 0xabcd2345),
"mov ip, #9029\n"
@@ -610,82 +642,82 @@
COMPARE_BOTH(Ldr(r0, MemOperand(r1, 0xfff123)),
"add r0, r1, #1044480\n" // #0xff000
- "add r0, #15728640\n" // #0x00f00000
+ "add r0, #15728640\n" // #0x00f00000
"ldr r0, [r0, #291]\n"); // #0x123
COMPARE_BOTH(Ldr(r0, MemOperand(r1, 0xff123)),
"add r0, r1, #1044480\n" // #0xff000
"ldr r0, [r0, #291]\n"); // #0x123
COMPARE_BOTH(Ldr(r0, MemOperand(r1, -0xff123)),
- "sub r0, r1, #1048576\n" // #0x100000
+ "sub r0, r1, #1048576\n" // #0x100000
"ldr r0, [r0, #3805]\n"); // #0xedd
COMPARE_A32(Ldr(r0, MemOperand(r1, 0xfff123, PreIndex)),
- "add r1, #1044480\n" // #0xff000
- "add r1, #15728640\n" // #0x00f00000
+ "add r1, #1044480\n" // #0xff000
+ "add r1, #15728640\n" // #0x00f00000
"ldr r0, [r1, #291]!\n"); // #0x123
COMPARE_A32(Ldr(r0, MemOperand(r1, 0xff123, PreIndex)),
- "add r1, #1044480\n" // #0xff000
+ "add r1, #1044480\n" // #0xff000
"ldr r0, [r1, #291]!\n"); // #0x123
COMPARE_A32(Ldr(r0, MemOperand(r1, -0xff123, PreIndex)),
- "sub r1, #1048576\n" // #0x100000
+ "sub r1, #1048576\n" // #0x100000
"ldr r0, [r1, #3805]!\n"); // #0xedd
COMPARE_T32(Ldr(r0, MemOperand(r1, 0xfff12, PreIndex)),
- "add r1, #65280\n" // #0xff00
- "add r1, #983040\n" // #0x000f0000
+ "add r1, #65280\n" // #0xff00
+ "add r1, #983040\n" // #0x000f0000
"ldr r0, [r1, #18]!\n"); // #0x12
COMPARE_T32(Ldr(r0, MemOperand(r1, 0xff12, PreIndex)),
- "add r1, #65280\n" // #0xff00
+ "add r1, #65280\n" // #0xff00
"ldr r0, [r1, #18]!\n"); // #0x12
COMPARE_T32(Ldr(r0, MemOperand(r1, -0xff12, PreIndex)),
- "sub r1, #65536\n" // #0x10000
+ "sub r1, #65536\n" // #0x10000
"ldr r0, [r1, #238]!\n"); // #0xee
COMPARE_A32(Ldr(r0, MemOperand(r1, 0xfff123, PostIndex)),
- "ldr r0, [r1], #291\n" // #0x123
- "add r1, #1044480\n" // #0xff000
+ "ldr r0, [r1], #291\n" // #0x123
+ "add r1, #1044480\n" // #0xff000
"add r1, #15728640\n"); // #0x00f00000
COMPARE_A32(Ldr(r0, MemOperand(r1, 0xff123, PostIndex)),
"ldr r0, [r1], #291\n" // #0x123
"add r1, #1044480\n"); // #0xff000
COMPARE_A32(Ldr(r0, MemOperand(r1, -0xff123, PostIndex)),
"ldr r0, [r1], #3805\n" // #0xedd
- "sub r1, #1048576\n"); // #0x100000
+ "sub r1, #1048576\n"); // #0x100000
COMPARE_T32(Ldr(r0, MemOperand(r1, 0xfff12, PostIndex)),
"ldr r0, [r1], #18\n" // #0x12
- "add r1, #65280\n" // #0xff00
+ "add r1, #65280\n" // #0xff00
"add r1, #983040\n"); // #0x000f0000
COMPARE_T32(Ldr(r0, MemOperand(r1, 0xff12, PostIndex)),
"ldr r0, [r1], #18\n" // #0x12
- "add r1, #65280\n"); // #0xff00
+ "add r1, #65280\n"); // #0xff00
COMPARE_T32(Ldr(r0, MemOperand(r1, -0xff12, PostIndex)),
"ldr r0, [r1], #238\n" // #0xee
- "sub r1, #65536\n"); // #0x10000
+ "sub r1, #65536\n"); // #0x10000
COMPARE_A32(Ldrh(r0, MemOperand(r1, 0xfff123)),
- "add r0, r1, #61696\n" // #0xf100
- "add r0, #16711680\n" // #0x00ff0000
+ "add r0, r1, #61696\n" // #0xf100
+ "add r0, #16711680\n" // #0x00ff0000
"ldrh r0, [r0, #35]\n"); // #0x23
COMPARE_T32(Ldrh(r0, MemOperand(r1, 0xfff123)),
- "add r0, r1, #1044480\n" // #0xff000
- "add r0, #15728640\n" // #0x00f00000
+ "add r0, r1, #1044480\n" // #0xff000
+ "add r0, #15728640\n" // #0x00f00000
"ldrh r0, [r0, #291]\n"); // #0x123
COMPARE_A32(Ldrh(r0, MemOperand(r1, 0xff123)),
- "add r0, r1, #61696\n" // #0xf100
- "add r0, #983040\n" // #0x000f0000
- "ldrh r0, [r0, #35]\n"); // #0x23
+ "add r0, r1, #61696\n" // #0xf100
+ "add r0, #983040\n" // #0x000f0000
+ "ldrh r0, [r0, #35]\n"); // #0x23
COMPARE_T32(Ldrh(r0, MemOperand(r1, 0xff123)),
- "add r0, r1, #1044480\n" // #0xff000
- "ldrh r0, [r0, #291]\n"); // #0x123
+ "add r0, r1, #1044480\n" // #0xff000
+ "ldrh r0, [r0, #291]\n"); // #0x123
COMPARE_A32(Ldrh(r0, MemOperand(r1, -0xff123)),
- "sub r0, r1, #61952\n" // #0xf200
- "sub r0, #983040\n" // #0x000f0000
- "ldrh r0, [r0, #221]\n"); // #0xdd
+ "sub r0, r1, #61952\n" // #0xf200
+ "sub r0, #983040\n" // #0x000f0000
+ "ldrh r0, [r0, #221]\n"); // #0xdd
COMPARE_T32(Ldrh(r0, MemOperand(r1, -0xff123)),
- "sub r0, r1, #1048576\n" // #0x100000
- "ldrh r0, [r0, #3805]\n"); // #0xedd
+ "sub r0, r1, #1048576\n" // #0x100000
+ "ldrh r0, [r0, #3805]\n"); // #0xedd
MUST_FAIL_TEST_BOTH(Ldr(r0, MemOperand(r0, 0xfff12, PreIndex)),
"Ill-formed 'ldr' instruction.\n");
@@ -699,8 +731,7 @@
SETUP();
// Register base and offset that we can encode in both A1 and T1.
- COMPARE_BOTH(Ldr(r0, MemOperand(r1, r8, Offset)),
- "ldr r0, [r1, r8]\n");
+ COMPARE_BOTH(Ldr(r0, MemOperand(r1, r8, Offset)), "ldr r0, [r1, r8]\n");
// Negative register offset. Use the destination as a scratch register,
// regardless of the values of the base and offset register.
@@ -731,38 +762,36 @@
"sub r1, r2\n");
// SP is allowed as base, offset and destination.
- COMPARE_BOTH(Ldr(sp, MemOperand(sp, sp, Offset)),
- "ldr sp, [sp, sp]\n");
+ COMPARE_BOTH(Ldr(sp, MemOperand(sp, sp, Offset)), "ldr sp, [sp, sp]\n");
// PC is allowed as destination - make sure it is not used as a temporary
// register.
- COMPARE_BOTH(Ldr(pc, MemOperand(r0, r0, Offset)),
- "ldr pc, [r0, r0]\n");
- COMPARE_A32(Ldr(pc, MemOperand(r0, r0, PreIndex)),
- "ldr pc, [r0, r0]!\n");
+ COMPARE_BOTH(Ldr(pc, MemOperand(r0, r0, Offset)), "ldr pc, [r0, r0]\n");
+ COMPARE_A32(Ldr(pc, MemOperand(r0, r0, PreIndex)), "ldr pc, [r0, r0]!\n");
COMPARE_T32(Ldr(pc, MemOperand(r0, r0, PreIndex)),
"add r0, r0\n"
"ldr pc, [r0]\n");
- COMPARE_A32(Ldr(pc, MemOperand(r0, r0, PostIndex)),
- "ldr pc, [r0], r0\n");
+ COMPARE_A32(Ldr(pc, MemOperand(r0, r0, PostIndex)), "ldr pc, [r0], r0\n");
COMPARE_T32(Ldr(pc, MemOperand(r0, r0, PostIndex)),
"ldr pc, [r0]\n"
"add r0, r0\n");
// PC is allowed as register base in the offset variant only for A32.
- COMPARE_A32(Ldr(r0, MemOperand(pc, r0, Offset)),
- "ldr r0, [pc, r0]\n");
+ COMPARE_A32(Ldr(r0, MemOperand(pc, r0, Offset)), "ldr r0, [pc, r0]\n");
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(pc, r0, Offset)),
"The MacroAssembler does not convert loads and stores with"
" a PC base register for T32.\n");
- // PC is not allowed as register base in the pre-index and post-index variants.
+ // PC is not allowed as register base in the pre-index and post-index
+ // variants.
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(pc, r0, PreIndex)),
"The MacroAssembler does not convert loads and stores "
- "with a PC base register in pre-index or post-index mode.\n");
+ "with a PC base register in pre-index or post-index "
+ "mode.\n");
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(pc, r0, PostIndex)),
"The MacroAssembler does not convert loads and stores "
- "with a PC base register in pre-index or post-index mode.\n");
+ "with a PC base register in pre-index or post-index "
+ "mode.\n");
// We don't convert loads with PC as the register offset.
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, minus, pc, Offset)),
@@ -771,7 +800,7 @@
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, pc, PreIndex)),
"The MacroAssembler does not convert loads and stores "
"with a PC offset register.\n");
- MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, pc, PostIndex)),
+ MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, pc, PostIndex)),
"The MacroAssembler does not convert loads and stores "
"with a PC offset register.\n");
@@ -788,9 +817,9 @@
SHOULD_FAIL_TEST_A32(Ldr(r0, MemOperand(r0, r1, PreIndex)));
SHOULD_FAIL_TEST_A32(Ldr(r0, MemOperand(r0, r1, PostIndex)));
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, r1, PreIndex)),
- "Ill-formed 'ldr' instruction.\n");
+ "Ill-formed 'ldr' instruction.\n");
MUST_FAIL_TEST_T32(Ldr(r0, MemOperand(r0, r1, PostIndex)),
- "Ill-formed 'ldr' instruction.\n");
+ "Ill-formed 'ldr' instruction.\n");
CLEANUP();
}
@@ -800,8 +829,7 @@
SETUP();
// Register base and offset that we can encode in both A1 and T1.
- COMPARE_BOTH(Str(r0, MemOperand(r1, r8, Offset)),
- "str r0, [r1, r8]\n");
+ COMPARE_BOTH(Str(r0, MemOperand(r1, r8, Offset)), "str r0, [r1, r8]\n");
// Negative register offset.
COMPARE_T32(Str(r0, MemOperand(r0, minus, r0, Offset)),
@@ -831,35 +859,33 @@
"sub r1, r2\n");
// SP is allowed as base, offset and source.
- COMPARE_BOTH(Str(sp, MemOperand(sp, sp, Offset)),
- "str sp, [sp, sp]\n");
+ COMPARE_BOTH(Str(sp, MemOperand(sp, sp, Offset)), "str sp, [sp, sp]\n");
// TODO: PC is allowed as the value we are storing for A32, but
// should not be allowed for T32 (unpredictable).
- COMPARE_A32(Str(pc, MemOperand(r0, r0, Offset)),
- "str pc, [r0, r0]\n");
- COMPARE_A32(Str(pc, MemOperand(r0, r0, PreIndex)),
- "str pc, [r0, r0]!\n");
- COMPARE_A32(Str(pc, MemOperand(r0, r0, PostIndex)),
- "str pc, [r0], r0\n");
+ COMPARE_A32(Str(pc, MemOperand(r0, r0, Offset)), "str pc, [r0, r0]\n");
+ COMPARE_A32(Str(pc, MemOperand(r0, r0, PreIndex)), "str pc, [r0, r0]!\n");
+ COMPARE_A32(Str(pc, MemOperand(r0, r0, PostIndex)), "str pc, [r0], r0\n");
SHOULD_FAIL_TEST_T32(Str(pc, MemOperand(r0, r0, Offset)));
SHOULD_FAIL_TEST_T32(Str(pc, MemOperand(r0, r0, PreIndex)));
SHOULD_FAIL_TEST_T32(Str(pc, MemOperand(r0, r0, PostIndex)));
// PC is allowed as register base in the offset variant only for A32.
- COMPARE_A32(Str(r0, MemOperand(pc, r0, Offset)),
- "str r0, [pc, r0]\n");
+ COMPARE_A32(Str(r0, MemOperand(pc, r0, Offset)), "str r0, [pc, r0]\n");
MUST_FAIL_TEST_T32(Str(r0, MemOperand(pc, r0, Offset)),
"The MacroAssembler does not convert loads and stores with"
" a PC base register for T32.\n");
- // PC is not allowed as register base in the pre-index and post-index variants.
+ // PC is not allowed as register base in the pre-index and post-index
+ // variants.
MUST_FAIL_TEST_T32(Str(r0, MemOperand(pc, r0, PreIndex)),
"The MacroAssembler does not convert loads and stores "
- "with a PC base register in pre-index or post-index mode.\n");
+ "with a PC base register in pre-index or post-index "
+ "mode.\n");
MUST_FAIL_TEST_T32(Str(r0, MemOperand(pc, r0, PostIndex)),
"The MacroAssembler does not convert loads and stores "
- "with a PC base register in pre-index or post-index mode.\n");
+ "with a PC base register in pre-index or post-index "
+ "mode.\n");
// We don't convert loads with PC as the register offset.
MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, minus, pc, Offset)),
@@ -868,7 +894,7 @@
MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, pc, PreIndex)),
"The MacroAssembler does not convert loads and stores "
"with a PC offset register.\n");
- MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, pc, PostIndex)),
+ MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, pc, PostIndex)),
"The MacroAssembler does not convert loads and stores "
"with a PC offset register.\n");
@@ -885,9 +911,9 @@
SHOULD_FAIL_TEST_A32(Str(r0, MemOperand(r0, r1, PreIndex)));
SHOULD_FAIL_TEST_A32(Str(r0, MemOperand(r0, r1, PostIndex)));
MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, r1, PreIndex)),
- "Ill-formed 'str' instruction.\n");
+ "Ill-formed 'str' instruction.\n");
MUST_FAIL_TEST_T32(Str(r0, MemOperand(r0, r1, PostIndex)),
- "Ill-formed 'str' instruction.\n");
+ "Ill-formed 'str' instruction.\n");
CLEANUP();
}
@@ -898,34 +924,29 @@
// - Tests with no offset.
- COMPARE_BOTH(Ldrd(r0, r1, MemOperand(r3)),
- "ldrd r0, r1, [r3]\n");
+ COMPARE_BOTH(Ldrd(r0, r1, MemOperand(r3)), "ldrd r0, r1, [r3]\n");
// Destination registers need to start with a even numbered register on A32.
MUST_FAIL_TEST_A32(Ldrd(r1, r2, MemOperand(r3)),
"Unpredictable instruction.\n");
- COMPARE_T32(Ldrd(r1, r2, MemOperand(r3)),
- "ldrd r1, r2, [r3]\n");
+ COMPARE_T32(Ldrd(r1, r2, MemOperand(r3)), "ldrd r1, r2, [r3]\n");
// Registers need to be adjacent on A32.
MUST_FAIL_TEST_A32(Ldrd(r0, r2, MemOperand(r1)),
"Ill-formed 'ldrd' instruction.\n");
- COMPARE_T32(Ldrd(r0, r2, MemOperand(r1)),
- "ldrd r0, r2, [r1]\n");
+ COMPARE_T32(Ldrd(r0, r2, MemOperand(r1)), "ldrd r0, r2, [r1]\n");
- COMPARE_BOTH(Ldrd(r0, r1, MemOperand(r2)),
- "ldrd r0, r1, [r2]\n");
+ COMPARE_BOTH(Ldrd(r0, r1, MemOperand(r2)), "ldrd r0, r1, [r2]\n");
// - Tests with immediate offsets.
COMPARE_A32(Ldrd(r0, r1, MemOperand(r2, 1020)),
- "add r0, r2, #1020\n"
- "ldrd r0, r1, [r0]\n");
- COMPARE_T32(Ldrd(r0, r1, MemOperand(r2, 1020)),
- "ldrd r0, r1, [r2, #1020]\n");
+ "add r0, r2, #1020\n"
+ "ldrd r0, r1, [r0]\n");
+ COMPARE_T32(Ldrd(r0, r1, MemOperand(r2, 1020)), "ldrd r0, r1, [r2, #1020]\n");
COMPARE_A32(Ldrd(r0, r1, MemOperand(r2, -1020)),
- "sub r0, r2, #1020\n"
- "ldrd r0, r1, [r0]\n");
+ "sub r0, r2, #1020\n"
+ "ldrd r0, r1, [r0]\n");
COMPARE_T32(Ldrd(r0, r1, MemOperand(r2, -1020)),
- "ldrd r0, r1, [r2, #-1020]\n");
+ "ldrd r0, r1, [r2, #-1020]\n");
COMPARE_A32(Ldrd(r0, r1, MemOperand(r2, 0xabcc)),
"add r0, r2, #43776\n"
@@ -1080,8 +1101,7 @@
// - Tests with register offsets.
- COMPARE_A32(Ldrd(r0, r1, MemOperand(r2, r3)),
- "ldrd r0, r1, [r2, r3]\n");
+ COMPARE_A32(Ldrd(r0, r1, MemOperand(r2, r3)), "ldrd r0, r1, [r2, r3]\n");
COMPARE_T32(Ldrd(r0, r1, MemOperand(r2, r3)),
"add r0, r2, r3\n"
"ldrd r0, r1, [r0]\n");
@@ -1151,29 +1171,24 @@
// - Tests with no offset.
- COMPARE_BOTH(Strd(r0, r1, MemOperand(r3)),
- "strd r0, r1, [r3]\n");
+ COMPARE_BOTH(Strd(r0, r1, MemOperand(r3)), "strd r0, r1, [r3]\n");
// Destination registers need to start with a even numbered register on A32.
MUST_FAIL_TEST_A32(Strd(r1, r2, MemOperand(r3)),
"Unpredictable instruction.\n");
- COMPARE_T32(Strd(r1, r2, MemOperand(r3)),
- "strd r1, r2, [r3]\n");
+ COMPARE_T32(Strd(r1, r2, MemOperand(r3)), "strd r1, r2, [r3]\n");
// Registers need to be adjacent on A32.
MUST_FAIL_TEST_A32(Strd(r0, r2, MemOperand(r1)),
"Ill-formed 'strd' instruction.\n");
- COMPARE_T32(Strd(r0, r2, MemOperand(r1)),
- "strd r0, r2, [r1]\n");
+ COMPARE_T32(Strd(r0, r2, MemOperand(r1)), "strd r0, r2, [r1]\n");
- COMPARE_BOTH(Strd(r0, r1, MemOperand(r2)),
- "strd r0, r1, [r2]\n");
+ COMPARE_BOTH(Strd(r0, r1, MemOperand(r2)), "strd r0, r1, [r2]\n");
// - Tests with immediate offsets.
COMPARE_A32(Strd(r0, r1, MemOperand(r2, 1020)),
"add ip, r2, #1020\n"
"strd r0, r1, [ip]\n");
- COMPARE_T32(Strd(r0, r1, MemOperand(r2, 1020)),
- "strd r0, r1, [r2, #1020]\n");
+ COMPARE_T32(Strd(r0, r1, MemOperand(r2, 1020)), "strd r0, r1, [r2, #1020]\n");
COMPARE_A32(Strd(r0, r1, MemOperand(r2, -1020)),
"sub ip, r2, #1020\n"
"strd r0, r1, [ip]\n");
@@ -1333,8 +1348,7 @@
// - Tests with register offsets.
- COMPARE_A32(Strd(r0, r1, MemOperand(r2, r3)),
- "strd r0, r1, [r2, r3]\n");
+ COMPARE_A32(Strd(r0, r1, MemOperand(r2, r3)), "strd r0, r1, [r2, r3]\n");
COMPARE_T32(Strd(r0, r1, MemOperand(r2, r3)),
"add ip, r2, r3\n"
"strd r0, r1, [ip]\n");
@@ -1404,18 +1418,18 @@
SETUP();
COMPARE_BOTH(Adc(r0, r1, 0xbadbeef),
- "mov r0, #48879\n"
- "movt r0, #2989\n"
- "adc r0, r1, r0\n");
+ "mov r0, #48879\n"
+ "movt r0, #2989\n"
+ "adc r0, r1, r0\n");
COMPARE_BOTH(Add(r0, r0, 0xbadbeef),
- "mov ip, #48879\n"
- "movt ip, #2989\n"
- "add r0, ip\n");
+ "mov ip, #48879\n"
+ "movt ip, #2989\n"
+ "add r0, ip\n");
COMPARE_BOTH(Mov(r0, 0xbadbeef),
- "mov r0, #48879\n"
- "movt r0, #2989\n");
+ "mov r0, #48879\n"
+ "movt r0, #2989\n");
COMPARE_A32(Mov(eq, r0, 0xbadbeef),
"moveq r0, #48879\n"
"movteq r0, #2989\n");
@@ -1425,9 +1439,9 @@
"movt r0, #2989\n");
COMPARE_BOTH(Movs(r0, 0xbadbeef),
- "mov r0, #48879\n"
- "movt r0, #2989\n"
- "tst r0, r0\n");
+ "mov r0, #48879\n"
+ "movt r0, #2989\n"
+ "tst r0, r0\n");
COMPARE_A32(Movs(eq, r0, 0xbadbeef),
"moveq r0, #48879\n"
"movteq r0, #2989\n"
@@ -1437,16 +1451,14 @@
"mov r0, #48879\n"
"movt r0, #2989\n"
"tst r0, r0\n");
- COMPARE_A32(Movs(pc, 0x1),
- "movs pc, #1\n");
+ COMPARE_A32(Movs(pc, 0x1), "movs pc, #1\n");
MUST_FAIL_TEST_T32(Movs(pc, 0x1), "Unpredictable instruction.\n");
- MUST_FAIL_TEST_BOTH(Movs(pc, 0xbadbeed),
- "Ill-formed 'movs' instruction.\n");
+ MUST_FAIL_TEST_BOTH(Movs(pc, 0xbadbeed), "Ill-formed 'movs' instruction.\n");
COMPARE_BOTH(Mov(pc, 0xbadbeef),
- "mov ip, #48879\n"
- "movt ip, #2989\n"
- "bx ip\n");
+ "mov ip, #48879\n"
+ "movt ip, #2989\n"
+ "bx ip\n");
COMPARE_A32(Mov(eq, pc, 0xbadbeef),
"mov ip, #48879\n"
"movt ip, #2989\n"
@@ -1465,10 +1477,8 @@
SETUP();
// Identities.
- COMPARE_BOTH(And(r0, r1, 0),
- "mov r0, #0\n");
- COMPARE_BOTH(And(r0, r0, 0xffffffff),
- "");
+ COMPARE_BOTH(And(r0, r1, 0), "mov r0, #0\n");
+ COMPARE_BOTH(And(r0, r0, 0xffffffff), "");
CLEANUP();
}
@@ -1477,10 +1487,8 @@
SETUP();
// Identities.
- COMPARE_BOTH(Bic(r0, r1, 0xffffffff),
- "mov r0, #0\n");
- COMPARE_BOTH(Bic(r0, r0, 0),
- "");
+ COMPARE_BOTH(Bic(r0, r1, 0xffffffff), "mov r0, #0\n");
+ COMPARE_BOTH(Bic(r0, r0, 0), "");
CLEANUP();
}
@@ -1489,10 +1497,8 @@
SETUP();
// Identities.
- COMPARE_BOTH(Orr(r0, r1, 0xffffffff),
- "mvn r0, #0\n");
- COMPARE_BOTH(Orr(r0, r0, 0),
- "");
+ COMPARE_BOTH(Orr(r0, r1, 0xffffffff), "mvn r0, #0\n");
+ COMPARE_BOTH(Orr(r0, r0, 0), "");
CLEANUP();
}
@@ -1502,52 +1508,34 @@
// Special case for Orr <-> Orn correspondance.
- COMPARE_T32(Orr(r0, r1, 0x00ffffff),
- "orn r0, r1, #0xff000000\n");
- COMPARE_T32(Orrs(r0, r1, 0x00ffffff),
- "orns r0, r1, #0xff000000\n");
+ COMPARE_T32(Orr(r0, r1, 0x00ffffff), "orn r0, r1, #0xff000000\n");
+ COMPARE_T32(Orrs(r0, r1, 0x00ffffff), "orns r0, r1, #0xff000000\n");
// Encodable immediates.
- COMPARE_A32(Add(r0, r1, -1),
- "sub r0, r1, #1\n");
- COMPARE_A32(Adds(r0, r1, -1),
- "subs r0, r1, #1\n");
+ COMPARE_A32(Add(r0, r1, -1), "sub r0, r1, #1\n");
+ COMPARE_A32(Adds(r0, r1, -1), "subs r0, r1, #1\n");
// 0xffffffff is encodable in a T32 ADD.
- COMPARE_T32(Add(r0, r1, -1),
- "add r0, r1, #4294967295\n");
- COMPARE_T32(Adds(r0, r1, -1),
- "adds r0, r1, #4294967295\n");
+ COMPARE_T32(Add(r0, r1, -1), "add r0, r1, #4294967295\n");
+ COMPARE_T32(Adds(r0, r1, -1), "adds r0, r1, #4294967295\n");
- COMPARE_BOTH(Add(r0, r1, -4),
- "sub r0, r1, #4\n");
- COMPARE_BOTH(Adds(r0, r1, -4),
- "subs r0, r1, #4\n");
+ COMPARE_BOTH(Add(r0, r1, -4), "sub r0, r1, #4\n");
+ COMPARE_BOTH(Adds(r0, r1, -4), "subs r0, r1, #4\n");
- COMPARE_BOTH(Adc(r0, r1, -2),
- "sbc r0, r1, #1\n");
- COMPARE_BOTH(Adcs(r0, r1, -2),
- "sbcs r0, r1, #1\n");
+ COMPARE_BOTH(Adc(r0, r1, -2), "sbc r0, r1, #1\n");
+ COMPARE_BOTH(Adcs(r0, r1, -2), "sbcs r0, r1, #1\n");
- COMPARE_A32(Sub(r0, r1, -1),
- "add r0, r1, #1\n");
- COMPARE_A32(Subs(r0, r1, -1),
- "adds r0, r1, #1\n");
+ COMPARE_A32(Sub(r0, r1, -1), "add r0, r1, #1\n");
+ COMPARE_A32(Subs(r0, r1, -1), "adds r0, r1, #1\n");
// 0xffffffff is encodable in a T32 SUB.
- COMPARE_T32(Sub(r0, r1, -1),
- "sub r0, r1, #4294967295\n");
- COMPARE_T32(Subs(r0, r1, -1),
- "subs r0, r1, #4294967295\n");
+ COMPARE_T32(Sub(r0, r1, -1), "sub r0, r1, #4294967295\n");
+ COMPARE_T32(Subs(r0, r1, -1), "subs r0, r1, #4294967295\n");
- COMPARE_BOTH(Sub(r0, r1, -4),
- "add r0, r1, #4\n");
- COMPARE_BOTH(Subs(r0, r1, -4),
- "adds r0, r1, #4\n");
+ COMPARE_BOTH(Sub(r0, r1, -4), "add r0, r1, #4\n");
+ COMPARE_BOTH(Subs(r0, r1, -4), "adds r0, r1, #4\n");
- COMPARE_BOTH(Sbc(r0, r1, -5),
- "adc r0, r1, #4\n");
- COMPARE_BOTH(Sbcs(r0, r1, -5),
- "adcs r0, r1, #4\n");
+ COMPARE_BOTH(Sbc(r0, r1, -5), "adc r0, r1, #4\n");
+ COMPARE_BOTH(Sbcs(r0, r1, -5), "adcs r0, r1, #4\n");
// Non-encodable immediates
@@ -1556,7 +1544,7 @@
"adc r0, r1, r0\n");
COMPARE_BOTH(Adc(r0, r1, -0xabcd),
- "mov r0, #43980\n" // This represents #0xabcd - 1.
+ "mov r0, #43980\n" // This represents #0xabcd - 1.
"sbc r0, r1, r0\n");
COMPARE_BOTH(Adc(r0, r1, 0x1234abcd),
@@ -1565,7 +1553,7 @@
"adc r0, r1, r0\n");
COMPARE_BOTH(Adc(r0, r1, -0x1234abcd),
- "mov r0, #43980\n" // This represents #0x1234abcd - 1.
+ "mov r0, #43980\n" // This represents #0x1234abcd - 1.
"movt r0, #4660\n"
"sbc r0, r1, r0\n");
@@ -1576,7 +1564,7 @@
"sbc r0, ip\n");
COMPARE_BOTH(Sbc(r0, r0, -0xabcd),
- "mov ip, #43980\n" // This represents #0xabcd - 1.
+ "mov ip, #43980\n" // This represents #0xabcd - 1.
"adc r0, ip\n");
COMPARE_BOTH(Sbc(r0, r0, 0x1234abcd),
@@ -1585,7 +1573,7 @@
"sbc r0, ip\n");
COMPARE_BOTH(Sbc(r0, r0, -0x1234abcd),
- "mov ip, #43980\n" // This represents #0x1234abcd - 1.
+ "mov ip, #43980\n" // This represents #0x1234abcd - 1.
"movt ip, #4660\n"
"adc r0, ip\n");
@@ -1659,14 +1647,14 @@
// Make sure GetArchitectureStatePCOffset() returns the correct value.
__ UseT32();
// Largest encodable offset.
- Label label_126(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(), 126);
- COMPARE_T32(Cbz(r0, &label_126),
- "cbz r0, 0x00000082\n");
- COMPARE_T32(Cbnz(r0, &label_126),
- "cbnz r0, 0x00000082\n");
+ Label label_126(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(),
+ 126);
+ COMPARE_T32(Cbz(r0, &label_126), "cbz r0, 0x00000082\n");
+ COMPARE_T32(Cbnz(r0, &label_126), "cbnz r0, 0x00000082\n");
// Offset cannot be encoded.
- Label label_128(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(), 128);
+ Label label_128(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(),
+ 128);
COMPARE_T32(Cbz(r0, &label_128),
"cbnz r0, 0x00000004\n"
"b 0x00000084\n");
@@ -1675,7 +1663,8 @@
"b 0x00000084\n");
// Offset that cannot be encoded and needs 32-bit branch instruction.
- Label label_8192(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(), 8192);
+ Label label_8192(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(),
+ 8192);
COMPARE_T32(Cbz(r0, &label_8192),
"cbnz r0, 0x00000006\n"
"b 0x00002004\n");
@@ -1693,7 +1682,8 @@
"b 0xfffffffc\n");
// Large negative offset.
- Label label_neg128(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(), -128);
+ Label label_neg128(__ GetCursorOffset() + __ GetArchitectureStatePCOffset(),
+ -128);
COMPARE_T32(Cbz(r0, &label_neg128),
"cbnz r0, 0x00000004\n"
"b 0xffffff84\n");
@@ -1706,206 +1696,176 @@
}
-#define TEST_VMEMOP(MACRO_OP, STRING_OP, DST_REG) \
- SETUP(); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 1024)), \
- "add ip, r8, #1024\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 1371)), \
- "add ip, r8, #1371\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 4113)), \
- "add ip, r8, #17\n" \
- "add ip, #4096\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 65808)), \
- "add ip, r8, #272\n" \
- "add ip, #65536\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -1024)), \
- "sub ip, r8, #1024\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -1371)), \
- "sub ip, r8, #1371\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -4113)), \
- "sub ip, r8, #17\n" \
- "sub ip, #4096\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -65808)), \
- "sub ip, r8, #272\n" \
- "sub ip, #65536\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 0, PreIndex)), \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 137, PreIndex)), \
- "add r9, #137\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 4110, PreIndex)), \
- "add r9, #14\n" \
- "add r9, #4096\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 65623, PreIndex)), \
- "add r9, #87\n" \
- "add r9, #65536\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -137, PreIndex)), \
- "sub r9, #137\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -4110, PreIndex)), \
- "sub r9, #14\n" \
- "sub r9, #4096\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -65623, PreIndex)), \
- "sub r9, #87\n" \
- "sub r9, #65536\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 0, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 137, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #137\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 4110, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #14\n" \
- "add r10, #4096\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 65623, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #87\n" \
- "add r10, #65536\n"); \
- \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -137, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #137\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -4110, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #14\n" \
- "sub r10, #4096\n"); \
- COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -65623, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #87\n" \
- "sub r10, #65536\n"); \
+#define TEST_VMEMOP(MACRO_OP, STRING_OP, DST_REG) \
+ SETUP(); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 1024)), \
+ "add ip, r8, #1024\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 1371)), \
+ "add ip, r8, #1371\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 4113)), \
+ "add ip, r8, #17\n" \
+ "add ip, #4096\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, 65808)), \
+ "add ip, r8, #272\n" \
+ "add ip, #65536\n" STRING_OP #DST_REG ", [ip]\n"); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -1024)), \
+ "sub ip, r8, #1024\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -1371)), \
+ "sub ip, r8, #1371\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -4113)), \
+ "sub ip, r8, #17\n" \
+ "sub ip, #4096\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r8, -65808)), \
+ "sub ip, r8, #272\n" \
+ "sub ip, #65536\n" STRING_OP #DST_REG ", [ip]\n"); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 0, PreIndex)), \
+ STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 137, PreIndex)), \
+ "add r9, #137\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 4110, PreIndex)), \
+ "add r9, #14\n" \
+ "add r9, #4096\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, 65623, PreIndex)), \
+ "add r9, #87\n" \
+ "add r9, #65536\n" STRING_OP #DST_REG ", [r9]\n"); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -137, PreIndex)), \
+ "sub r9, #137\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -4110, PreIndex)), \
+ "sub r9, #14\n" \
+ "sub r9, #4096\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r9, -65623, PreIndex)), \
+ "sub r9, #87\n" \
+ "sub r9, #65536\n" STRING_OP #DST_REG ", [r9]\n"); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 0, PostIndex)), \
+ STRING_OP #DST_REG ", [r10]\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 137, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #137\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 4110, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #14\n" \
+ "add r10, #4096\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, 65623, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #87\n" \
+ "add r10, #65536\n"); \
+ \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -137, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #137\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -4110, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #14\n" \
+ "sub r10, #4096\n"); \
+ COMPARE_T32(MACRO_OP(DST_REG, MemOperand(r10, -65623, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #87\n" \
+ "sub r10, #65536\n"); \
CLEANUP();
-TEST(macro_assembler_T32_Vldr_d) {
- TEST_VMEMOP(Vldr, "vldr ", d0);
-}
+TEST(macro_assembler_T32_Vldr_d) { TEST_VMEMOP(Vldr, "vldr ", d0); }
-TEST(macro_assembler_T32_Vstr_d) {
- TEST_VMEMOP(Vstr, "vstr ", d1);
-}
+TEST(macro_assembler_T32_Vstr_d) { TEST_VMEMOP(Vstr, "vstr ", d1); }
-TEST(macro_assembler_T32_Vldr_s) {
- TEST_VMEMOP(Vldr, "vldr ", s2);
-}
+TEST(macro_assembler_T32_Vldr_s) { TEST_VMEMOP(Vldr, "vldr ", s2); }
-TEST(macro_assembler_T32_Vstr_s) {
- TEST_VMEMOP(Vstr, "vstr ", s3);
-}
+TEST(macro_assembler_T32_Vstr_s) { TEST_VMEMOP(Vstr, "vstr ", s3); }
#undef TEST_VMEMOP
-#define TEST_VMEMOP(MACRO_OP, STRING_OP, DST_REG) \
- SETUP(); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 137)), \
- "add ip, r8, #137\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 274)), \
- "add ip, r8, #18\n" \
- "add ip, #256\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 65623)), \
- "add ip, r8, #87\n" \
- "add ip, #65536\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -137)), \
- "sub ip, r8, #137\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -274)), \
- "sub ip, r8, #18\n" \
- "sub ip, #256\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -65623)), \
- "sub ip, r8, #87\n" \
- "sub ip, #65536\n" \
- STRING_OP # DST_REG ", [ip]\n"); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 0, PreIndex)), \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 137, PreIndex)), \
- "add r9, #137\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 274, PreIndex)), \
- "add r9, #18\n" \
- "add r9, #256\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 65623, PreIndex)), \
- "add r9, #87\n" \
- "add r9, #65536\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -137, PreIndex)), \
- "sub r9, #137\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -274, PreIndex)), \
- "sub r9, #18\n" \
- "sub r9, #256\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -65623, PreIndex)), \
- "sub r9, #87\n" \
- "sub r9, #65536\n" \
- STRING_OP # DST_REG ", [r9]\n"); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 0, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 137, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #137\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 274, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #18\n" \
- "add r10, #256\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 65623, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "add r10, #87\n" \
- "add r10, #65536\n"); \
- \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -137, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #137\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -274, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #18\n" \
- "sub r10, #256\n"); \
- COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -65623, PostIndex)), \
- STRING_OP # DST_REG ", [r10]\n" \
- "sub r10, #87\n" \
- "sub r10, #65536\n"); \
+#define TEST_VMEMOP(MACRO_OP, STRING_OP, DST_REG) \
+ SETUP(); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 137)), \
+ "add ip, r8, #137\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 274)), \
+ "add ip, r8, #18\n" \
+ "add ip, #256\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, 65623)), \
+ "add ip, r8, #87\n" \
+ "add ip, #65536\n" STRING_OP #DST_REG ", [ip]\n"); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -137)), \
+ "sub ip, r8, #137\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -274)), \
+ "sub ip, r8, #18\n" \
+ "sub ip, #256\n" STRING_OP #DST_REG ", [ip]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r8, -65623)), \
+ "sub ip, r8, #87\n" \
+ "sub ip, #65536\n" STRING_OP #DST_REG ", [ip]\n"); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 0, PreIndex)), \
+ STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 137, PreIndex)), \
+ "add r9, #137\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 274, PreIndex)), \
+ "add r9, #18\n" \
+ "add r9, #256\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, 65623, PreIndex)), \
+ "add r9, #87\n" \
+ "add r9, #65536\n" STRING_OP #DST_REG ", [r9]\n"); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -137, PreIndex)), \
+ "sub r9, #137\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -274, PreIndex)), \
+ "sub r9, #18\n" \
+ "sub r9, #256\n" STRING_OP #DST_REG ", [r9]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r9, -65623, PreIndex)), \
+ "sub r9, #87\n" \
+ "sub r9, #65536\n" STRING_OP #DST_REG ", [r9]\n"); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 0, PostIndex)), \
+ STRING_OP #DST_REG ", [r10]\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 137, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #137\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 274, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #18\n" \
+ "add r10, #256\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, 65623, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "add r10, #87\n" \
+ "add r10, #65536\n"); \
+ \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -137, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #137\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -274, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #18\n" \
+ "sub r10, #256\n"); \
+ COMPARE_A32(MACRO_OP(DST_REG, MemOperand(r10, -65623, PostIndex)), \
+ STRING_OP #DST_REG \
+ ", [r10]\n" \
+ "sub r10, #87\n" \
+ "sub r10, #65536\n"); \
CLEANUP();
-TEST(macro_assembler_A32_Vldr_d) {
- TEST_VMEMOP(Vldr, "vldr ", d0);
-}
+TEST(macro_assembler_A32_Vldr_d) { TEST_VMEMOP(Vldr, "vldr ", d0); }
-TEST(macro_assembler_A32_Vstr_d) {
- TEST_VMEMOP(Vstr, "vstr ", d1);
-}
+TEST(macro_assembler_A32_Vstr_d) { TEST_VMEMOP(Vstr, "vstr ", d1); }
-TEST(macro_assembler_A32_Vldr_s) {
- TEST_VMEMOP(Vldr, "vldr ", s2);
-}
+TEST(macro_assembler_A32_Vldr_s) { TEST_VMEMOP(Vldr, "vldr ", s2); }
-TEST(macro_assembler_A32_Vstr_s) {
- TEST_VMEMOP(Vstr, "vstr ", s3);
-}
+TEST(macro_assembler_A32_Vstr_s) { TEST_VMEMOP(Vstr, "vstr ", s3); }
#undef TEST_VMEMOP
@@ -1942,98 +1902,88 @@
CLEANUP();
}
-#define TEST_SHIFT_T32(Inst, name, offset) \
- COMPARE_T32(Inst(r0, Operand(r1, LSL, r2)), \
- "lsl ip, r1, r2\n" \
- name " r0, ip\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, LSR, r2)), \
- "lsr ip, r1, r2\n" \
- name " r0, ip\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, ASR, r2)), \
- "asr ip, r1, r2\n" \
- name " r0, ip\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, ROR, r2)), \
- "ror ip, r1, r2\n" \
- name " r0, ip\n"); \
- COMPARE_T32(Inst(eq, r0, Operand(r1, LSL, r2)),\
- "bne "#offset"\n" \
- "lsl ip, r1, r2\n" \
- name " r0, ip\n"); \
- COMPARE_T32(Inst(le, r0, Operand(r1, LSL, r2)),\
- "bgt "#offset"\n" \
- "lsl ip, r1, r2\n" \
- name " r0, ip\n");
+#define TEST_SHIFT_T32(Inst, name, offset) \
+ COMPARE_T32(Inst(r0, Operand(r1, LSL, r2)), \
+ "lsl ip, r1, r2\n" name " r0, ip\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, LSR, r2)), \
+ "lsr ip, r1, r2\n" name " r0, ip\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, ASR, r2)), \
+ "asr ip, r1, r2\n" name " r0, ip\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, ROR, r2)), \
+ "ror ip, r1, r2\n" name " r0, ip\n"); \
+ COMPARE_T32(Inst(eq, r0, Operand(r1, LSL, r2)), \
+ "bne " #offset \
+ "\n" \
+ "lsl ip, r1, r2\n" name " r0, ip\n"); \
+ COMPARE_T32(Inst(le, r0, Operand(r1, LSL, r2)), \
+ "bgt " #offset \
+ "\n" \
+ "lsl ip, r1, r2\n" name " r0, ip\n");
-#define TEST_MOV_SHIFT_T32(Inst, s, offset) \
- COMPARE_T32(Inst(r0, Operand(r1, LSL, r2)), \
- "lsl" s " r0, r1, r2\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, LSR, r2)), \
- "lsr" s " r0, r1, r2\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, ASR, r2)), \
- "asr" s " r0, r1, r2\n"); \
- COMPARE_T32(Inst(r0, Operand(r1, ROR, r2)), \
- "ror" s " r0, r1, r2\n"); \
- COMPARE_T32(Inst(eq, r0, Operand(r1, LSL, r2)),\
- "bne "#offset"\n" \
- "lsl" s " r0, r1, r2\n"); \
- COMPARE_T32(Inst(le, r0, Operand(r1, LSL, r2)),\
- "bgt "#offset"\n" \
+#define TEST_MOV_SHIFT_T32(Inst, s, offset) \
+ COMPARE_T32(Inst(r0, Operand(r1, LSL, r2)), "lsl" s " r0, r1, r2\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, LSR, r2)), "lsr" s " r0, r1, r2\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, ASR, r2)), "asr" s " r0, r1, r2\n"); \
+ COMPARE_T32(Inst(r0, Operand(r1, ROR, r2)), "ror" s " r0, r1, r2\n"); \
+ COMPARE_T32(Inst(eq, r0, Operand(r1, LSL, r2)), \
+ "bne " #offset \
+ "\n" \
+ "lsl" s " r0, r1, r2\n"); \
+ COMPARE_T32(Inst(le, r0, Operand(r1, LSL, r2)), \
+ "bgt " #offset \
+ "\n" \
"lsl" s " r0, r1, r2\n");
-#define TEST_WIDE_IMMEDIATE(Inst, name, offset) \
- COMPARE_BOTH(Inst(r0, 0xbadbeef), \
- "mov ip, #48879\n" \
- "movt ip, #2989\n" \
- name " r0, ip\n"); \
- COMPARE_A32(Inst(eq, r0, 0xbadbeef), \
- "moveq ip, #48879\n" \
- "movteq ip, #2989\n" \
- name "eq r0, ip\n"); \
- COMPARE_T32(Inst(eq, r0, 0xbadbeef), \
- "bne "#offset"\n" \
- "mov ip, #48879\n" \
- "movt ip, #2989\n" \
- name " r0, ip\n");
+#define TEST_WIDE_IMMEDIATE(Inst, name, offset) \
+ COMPARE_BOTH(Inst(r0, 0xbadbeef), \
+ "mov ip, #48879\n" \
+ "movt ip, #2989\n" name " r0, ip\n"); \
+ COMPARE_A32(Inst(eq, r0, 0xbadbeef), \
+ "moveq ip, #48879\n" \
+ "movteq ip, #2989\n" name "eq r0, ip\n"); \
+ COMPARE_T32(Inst(eq, r0, 0xbadbeef), \
+ "bne " #offset \
+ "\n" \
+ "mov ip, #48879\n" \
+ "movt ip, #2989\n" name " r0, ip\n");
#define TEST_WIDE_IMMEDIATE_PC(Inst, name, offset) \
COMPARE_A32(Inst(pc, 0xbadbeef), \
"mov ip, #48879\n" \
- "movt ip, #2989\n" \
- name " pc, ip\n"); \
+ "movt ip, #2989\n" name " pc, ip\n"); \
COMPARE_A32(Inst(eq, pc, 0xbadbeef), \
"moveq ip, #48879\n" \
- "movteq ip, #2989\n" \
- name "eq pc, ip\n"); \
+ "movteq ip, #2989\n" name "eq pc, ip\n"); \
MUST_FAIL_TEST_T32(Inst(pc, 0xbadbeef), \
"Ill-formed '" name "' instruction.\n"); \
MUST_FAIL_TEST_T32(Inst(eq, pc, 0xbadbeef), \
- "Ill-formed '" name "' instruction.\n"); \
+ "Ill-formed '" name "' instruction.\n");
TEST(macro_assembler_InstructionCondSizeROp) {
SETUP();
// T32 register shifted register.
- TEST_SHIFT_T32(Cmn,"cmn", 0x0000000a)
- TEST_SHIFT_T32(Cmp,"cmp", 0x00000008)
- TEST_SHIFT_T32(Mvn,"mvn", 0x0000000a)
- TEST_SHIFT_T32(Mvns,"mvns", 0x0000000a)
- TEST_SHIFT_T32(Sxtb,"sxtb", 0x0000000a)
- TEST_SHIFT_T32(Sxth,"sxth", 0x0000000a)
- TEST_SHIFT_T32(Tst,"tst", 0x0000000a)
- TEST_SHIFT_T32(Uxtb,"uxtb", 0x0000000a)
- TEST_SHIFT_T32(Uxth,"uxth", 0x0000000a)
+ TEST_SHIFT_T32(Cmn, "cmn", 0x0000000a)
+ TEST_SHIFT_T32(Cmp, "cmp", 0x00000008)
+ TEST_SHIFT_T32(Mvn, "mvn", 0x0000000a)
+ TEST_SHIFT_T32(Mvns, "mvns", 0x0000000a)
+ TEST_SHIFT_T32(Sxtb, "sxtb", 0x0000000a)
+ TEST_SHIFT_T32(Sxth, "sxth", 0x0000000a)
+ TEST_SHIFT_T32(Tst, "tst", 0x0000000a)
+ TEST_SHIFT_T32(Uxtb, "uxtb", 0x0000000a)
+ TEST_SHIFT_T32(Uxth, "uxth", 0x0000000a)
TEST_MOV_SHIFT_T32(Mov, "", 0x00000006)
TEST_MOV_SHIFT_T32(Movs, "s", 0x00000006)
- MUST_FAIL_TEST_BOTH(Movs(pc, r0),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_BOTH(Movs(pc, r0), "Unpredictable instruction.\n");
MUST_FAIL_TEST_BOTH(Movs(pc, Operand(r0, LSL, 0x4)),
"Unpredictable instruction.\n");
MUST_FAIL_TEST_BOTH(Movs(pc, Operand(r0, ASR, r2)),
"Unpredictable instruction.\n");
- // Wide immediates (Mov and Movs are tested in "macro_assembler_wide_immediate").
+ // Wide immediates (Mov and Movs are tested in
+ // "macro_assembler_wide_immediate").
TEST_WIDE_IMMEDIATE(Cmp, "cmp", 0x0000000c);
TEST_WIDE_IMMEDIATE(Cmn, "cmn", 0x0000000e);
TEST_WIDE_IMMEDIATE(Tst, "tst", 0x0000000e);
@@ -2045,9 +1995,11 @@
TEST_WIDE_IMMEDIATE(Mvn, "mvn", 0x0000000e);
TEST_WIDE_IMMEDIATE(Mvns, "mvns", 0x0000000e);
MUST_FAIL_TEST_BOTH(Mvn(pc, 0xbadbeef), "Ill-formed 'mvn' instruction.\n");
- MUST_FAIL_TEST_BOTH(Mvn(eq, pc, 0xbadbeef), "Ill-formed 'mvn' instruction.\n");
+ MUST_FAIL_TEST_BOTH(Mvn(eq, pc, 0xbadbeef),
+ "Ill-formed 'mvn' instruction.\n");
MUST_FAIL_TEST_BOTH(Mvns(pc, 0xbadbeef), "Ill-formed 'mvns' instruction.\n");
- MUST_FAIL_TEST_BOTH(Mvns(eq, pc, 0xbadbeef), "Ill-formed 'mvns' instruction.\n");
+ MUST_FAIL_TEST_BOTH(Mvns(eq, pc, 0xbadbeef),
+ "Ill-formed 'mvns' instruction.\n");
MUST_FAIL_TEST_BOTH(Sxtb(r0, 0x1), "Ill-formed 'sxtb' instruction.\n");
MUST_FAIL_TEST_BOTH(Sxth(r0, 0x1), "Ill-formed 'sxth' instruction.\n");
@@ -2072,9 +2024,9 @@
// Wide immediate.
COMPARE_BOTH(Msr(APSR_nzcvq, 0xbadbeef),
- "mov ip, #48879\n"
- "movt ip, #2989\n"
- "msr APSR_nzcvq, ip\n");
+ "mov ip, #48879\n"
+ "movt ip, #2989\n"
+ "msr APSR_nzcvq, ip\n");
// Other types of operands are not handled.
MUST_FAIL_TEST_BOTH(Msr(APSR_nzcvq, Operand(r0, LSR, r1)),
@@ -2089,8 +2041,7 @@
COMPARE_BOTH(Vmov(s0, 0.0f),
"mov ip, #0\n"
"vmov s0, ip\n");
- COMPARE_BOTH(Vmov(s1, 1.0f),
- "vmov.f32 s1, #1\n");
+ COMPARE_BOTH(Vmov(s1, 1.0f), "vmov.f32 s1, #1\n");
COMPARE_BOTH(Vmov(s2, RawbitsToFloat(0x0000db6c)),
"mov ip, #56172\n"
"vmov s2, ip\n");
@@ -2106,10 +2057,8 @@
"movt ip, #46893\n"
"vmov s5, ip\n");
- COMPARE_BOTH(Vmov(d6, 0.0),
- "vmov.i64 d6, #0x0000000000000000\n");
- COMPARE_BOTH(Vmov(d7, 1.0),
- "vmov.f64 d7, #1\n");
+ COMPARE_BOTH(Vmov(d6, 0.0), "vmov.i64 d6, #0x0000000000000000\n");
+ COMPARE_BOTH(Vmov(d7, 1.0), "vmov.f64 d7, #1\n");
COMPARE_BOTH(Vmov(d8, RawbitsToDouble(0x000000000000af8e)),
"mov ip, #44942\n"
"vdup.32 d8, ip\n"
@@ -2162,8 +2111,7 @@
UseScratchRegisterScope temps(&masm);
temps.ExcludeAll();
- COMPARE_BOTH(Push(RegisterList(0x1111)),
- "push {r0,r4,r8,ip}\n");
+ COMPARE_BOTH(Push(RegisterList(0x1111)), "push {r0,r4,r8,ip}\n");
COMPARE_BOTH(Push(RegisterList(0x1fff)),
"push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,ip}\n");
@@ -2208,8 +2156,10 @@
COMPARE_BOTH(Push(RegisterList(r8)), "stmdb sp!, {r8}\n");
// Cannot push the sp and pc in T32 when using a register list.
- MUST_FAIL_TEST_T32(Push(RegisterList(sp)), "Ill-formed 'push' instruction.\n");
- MUST_FAIL_TEST_T32(Push(RegisterList(pc)), "Ill-formed 'push' instruction.\n");
+ MUST_FAIL_TEST_T32(Push(RegisterList(sp)),
+ "Ill-formed 'push' instruction.\n");
+ MUST_FAIL_TEST_T32(Push(RegisterList(pc)),
+ "Ill-formed 'push' instruction.\n");
CLEANUP();
}
@@ -2221,8 +2171,7 @@
UseScratchRegisterScope temps(&masm);
temps.ExcludeAll();
- COMPARE_BOTH(Pop(RegisterList(0x1111)),
- "pop {r0,r4,r8,ip}\n");
+ COMPARE_BOTH(Pop(RegisterList(0x1111)), "pop {r0,r4,r8,ip}\n");
COMPARE_BOTH(Pop(RegisterList(0x1fff)),
"pop {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,ip}\n");
@@ -2271,36 +2220,26 @@
// ADC, ADCS (immediate).
COMPARE_A32(Adc(pc, r0, 1), "adc pc, r0, #1\n");
COMPARE_A32(Adc(r0, pc, 1), "adc r0, pc, #1\n");
- MUST_FAIL_TEST_T32(Adc(pc, r0, 1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adc(r0, pc, 1),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adc(pc, r0, 1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adc(r0, pc, 1), "Unpredictable instruction.\n");
COMPARE_A32(Adcs(pc, r0, 1), "adcs pc, r0, #1\n");
COMPARE_A32(Adcs(r0, pc, 1), "adcs r0, pc, #1\n");
- MUST_FAIL_TEST_T32(Adcs(pc, r0, 1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adcs(r0, pc, 1),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adcs(pc, r0, 1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adcs(r0, pc, 1), "Unpredictable instruction.\n");
// ADC, ADCS (register).
COMPARE_A32(Adc(pc, r0, r1), "adc pc, r0, r1\n");
COMPARE_A32(Adc(r0, pc, r1), "adc r0, pc, r1\n");
COMPARE_A32(Adc(r0, r1, pc), "adc r0, r1, pc\n");
- MUST_FAIL_TEST_T32(Adc(pc, r0, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adc(r0, pc, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adc(r0, r1, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adc(pc, r0, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adc(r0, pc, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adc(r0, r1, pc), "Unpredictable instruction.\n");
COMPARE_A32(Adcs(pc, r0, r1), "adcs pc, r0, r1\n");
COMPARE_A32(Adcs(r0, pc, r1), "adcs r0, pc, r1\n");
COMPARE_A32(Adcs(r0, r1, pc), "adcs r0, r1, pc\n");
- MUST_FAIL_TEST_T32(Adcs(pc, r0, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adcs(r0, pc, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adcs(r0, r1, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adcs(pc, r0, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adcs(r0, pc, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adcs(r0, r1, pc), "Unpredictable instruction.\n");
// ADC, ADCS (register-shifted register).
MUST_FAIL_TEST_A32(Adc(pc, r0, Operand(r1, LSL, r2)),
@@ -2324,22 +2263,17 @@
COMPARE_A32(Add(r0, pc, 1), "adr r0, 0x00000009\n");
COMPARE_T32(Add(r0, pc, 1), "adr r0, 0x00000005\n");
COMPARE_A32(Add(pc, pc, 1), "adr pc, 0x00000009\n");
- MUST_FAIL_TEST_T32(Add(pc, pc, 1),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, pc, 1), "Unpredictable instruction.\n");
// ADD, ADDS (immediate).
COMPARE_A32(Add(pc, r0, 1), "add pc, r0, #1\n");
- MUST_FAIL_TEST_T32(Add(pc, r0, 1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Add(pc, r0, 0x123),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, r0, 1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, r0, 0x123), "Unpredictable instruction.\n");
COMPARE_A32(Adds(pc, r0, 1), "adds pc, r0, #1\n");
COMPARE_A32(Adds(r0, pc, 1), "adds r0, pc, #1\n");
// TODO: Try to make these error messages more consistent.
- MUST_FAIL_TEST_T32(Adds(r0, pc, 1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adds(r0, pc, 0x123),
- "Ill-formed 'adds' instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(r0, pc, 1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(r0, pc, 0x123), "Ill-formed 'adds' instruction.\n");
// ADD, ADDS (register).
COMPARE_A32(Add(pc, r0, r1), "add pc, r0, r1\n");
@@ -2347,21 +2281,15 @@
COMPARE_A32(Add(r0, r1, pc), "add r0, r1, pc\n");
COMPARE_T32(Add(r0, r0, pc), "add r0, pc\n");
COMPARE_T32(Add(pc, pc, r0), "add pc, r0\n");
- MUST_FAIL_TEST_T32(Add(pc, pc, pc),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Add(pc, r0, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Add(r0, pc, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Add(r0, r1, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, pc, pc), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, r0, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(r0, pc, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(r0, r1, pc), "Unpredictable instruction.\n");
COMPARE_A32(Adds(pc, r0, r1), "adds pc, r0, r1\n");
COMPARE_A32(Adds(r0, pc, r1), "adds r0, pc, r1\n");
COMPARE_A32(Adds(r0, r1, pc), "adds r0, r1, pc\n");
- MUST_FAIL_TEST_T32(Adds(r0, pc, r1),
- "Unpredictable instruction.\n");
- MUST_FAIL_TEST_T32(Adds(r0, r1, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(r0, pc, r1), "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(r0, r1, pc), "Unpredictable instruction.\n");
// ADD, ADDS (register-shifted register)
MUST_FAIL_TEST_A32(Add(pc, r0, Operand(r1, LSL, r2)),
@@ -2373,8 +2301,7 @@
MUST_FAIL_TEST_A32(Add(r0, r1, Operand(r2, LSL, pc)),
"Unpredictable instruction.\n");
COMPARE_A32(Add(pc, sp, 1), "add pc, sp, #1\n");
- MUST_FAIL_TEST_T32(Add(pc, sp, 1),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, sp, 1), "Unpredictable instruction.\n");
MUST_FAIL_TEST_A32(Adds(pc, r0, Operand(r1, LSL, r2)),
"Unpredictable instruction.\n");
MUST_FAIL_TEST_A32(Adds(r0, pc, Operand(r1, LSL, r2)),
@@ -2386,41 +2313,33 @@
// ADD, ADDS (SP plus immediate).
COMPARE_A32(Add(pc, sp, 1), "add pc, sp, #1\n");
- MUST_FAIL_TEST_T32(Add(pc, sp, 1),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, sp, 1), "Unpredictable instruction.\n");
COMPARE_A32(Adds(pc, sp, 1), "adds pc, sp, #1\n");
- MUST_FAIL_TEST_T32(Adds(pc, sp, 1),
- "Ill-formed 'adds' instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(pc, sp, 1), "Ill-formed 'adds' instruction.\n");
// ADD, ADDS (SP plus register).
COMPARE_A32(Add(pc, sp, r0), "add pc, sp, r0\n");
- MUST_FAIL_TEST_T32(Add(pc, sp, r0),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(pc, sp, r0), "Unpredictable instruction.\n");
COMPARE_A32(Add(r0, sp, pc), "add r0, sp, pc\n");
- MUST_FAIL_TEST_T32(Add(r0, sp, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Add(r0, sp, pc), "Unpredictable instruction.\n");
COMPARE_BOTH(Add(pc, sp, pc), "add pc, sp, pc\n");
COMPARE_BOTH(Add(sp, sp, pc), "add sp, pc\n");
COMPARE_A32(Adds(pc, sp, r0), "adds pc, sp, r0\n");
- MUST_FAIL_TEST_T32(Adds(pc, sp, r0),
- "Ill-formed 'adds' instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(pc, sp, r0), "Ill-formed 'adds' instruction.\n");
COMPARE_A32(Adds(r0, sp, pc), "adds r0, sp, pc\n");
- MUST_FAIL_TEST_T32(Adds(r0, sp, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(r0, sp, pc), "Unpredictable instruction.\n");
COMPARE_A32(Adds(pc, sp, pc), "adds pc, sp, pc\n");
- MUST_FAIL_TEST_T32(Adds(pc, sp, pc),
- "Ill-formed 'adds' instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(pc, sp, pc), "Ill-formed 'adds' instruction.\n");
COMPARE_A32(Adds(sp, sp, pc), "adds sp, pc\n");
- MUST_FAIL_TEST_T32(Adds(sp, sp, pc),
- "Unpredictable instruction.\n");
+ MUST_FAIL_TEST_T32(Adds(sp, sp, pc), "Unpredictable instruction.\n");
// ADR.
{
- Label label;
- masm.Bind(&label);
- COMPARE_A32(Adr(pc, &label), "adr pc, 0x00000000\n");
- MUST_FAIL_TEST_T32(Adr(pc, &label),
- "Unpredictable instruction.\n");
+ Literal<uint32_t> literal(0x12345678);
+ // The address is 0x4 and not 0x0 because of the branch over the literal.
+ // TODO: Consider disallowing this instruction.
+ COMPARE_A32(Adr(pc, &literal), "adr pc, 0x00000004\n");
+ MUST_FAIL_TEST_T32(Adr(pc, &literal), "Unpredictable instruction.\n");
}
// CLZ.
@@ -2435,8 +2354,7 @@
MUST_FAIL_TEST_T32(Mov(pc, 0xf000), "Unpredictable instruction.\n");
COMPARE_A32(Movs(pc, 1), "movs pc, #1\n");
MUST_FAIL_TEST_T32(Movs(pc, 1), "Unpredictable instruction.\n");
- MUST_FAIL_TEST_BOTH(Movs(pc, 0xfff),
- "Ill-formed 'movs' instruction.\n");
+ MUST_FAIL_TEST_BOTH(Movs(pc, 0xfff), "Ill-formed 'movs' instruction.\n");
COMPARE_A32(Movs(pc, 0xf000), "movs pc, #61440\n");
MUST_FAIL_TEST_T32(Movs(pc, 0xf000), "Unpredictable instruction.\n");
@@ -2492,42 +2410,56 @@
MUST_FAIL_TEST_A32(Add(r0, pc, 1025), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_A32(Add(r0, pc, 0xffff), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_A32(Add(r0, pc, 0x10001), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_A32(Add(r0, pc, 0x12345678), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_A32(Add(r0, pc, 0x7fffffff), "Ill-formed 'add' instruction.\n");
- COMPARE_A32(Add(r0, pc, -1025), "adr r0, 0x00000007\n"
- "sub r0, #1024\n");
- COMPARE_A32(Add(r0, pc, -0xffff), "adr r0, 0xffffff09\n"
- "sub r0, #65280\n");
- COMPARE_A32(Add(r0, pc, -0x10001), "adr r0, 0x00000007\n"
- "sub r0, #65536\n");
- COMPARE_A32(Add(r0, pc, -0x2345678), "adr r0, 0xfffffd90\n"
- "sub r0, #21504\n"
- "sub r0, #36962304\n");
- COMPARE_A32(Add(r0, pc, -0x12345678), "adr r0, 0xfffffd90\n"
- "mov ip, #21504\n"
- "movt ip, #4660\n"
- "sub r0, ip\n");
+ MUST_FAIL_TEST_A32(Add(r0, pc, 0x12345678),
+ "Ill-formed 'add' instruction.\n");
+ MUST_FAIL_TEST_A32(Add(r0, pc, 0x7fffffff),
+ "Ill-formed 'add' instruction.\n");
+ COMPARE_A32(Add(r0, pc, -1025),
+ "adr r0, 0x00000007\n"
+ "sub r0, #1024\n");
+ COMPARE_A32(Add(r0, pc, -0xffff),
+ "adr r0, 0xffffff09\n"
+ "sub r0, #65280\n");
+ COMPARE_A32(Add(r0, pc, -0x10001),
+ "adr r0, 0x00000007\n"
+ "sub r0, #65536\n");
+ COMPARE_A32(Add(r0, pc, -0x2345678),
+ "adr r0, 0xfffffd90\n"
+ "sub r0, #21504\n"
+ "sub r0, #36962304\n");
+ COMPARE_A32(Add(r0, pc, -0x12345678),
+ "adr r0, 0xfffffd90\n"
+ "mov ip, #21504\n"
+ "movt ip, #4660\n"
+ "sub r0, ip\n");
MUST_FAIL_TEST_A32(Sub(r0, pc, -1025), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_A32(Sub(r0, pc, -0xffff), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_A32(Sub(r0, pc, -0x10001), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_A32(Sub(r0, pc, -0x12345678), "Ill-formed 'add' instruction.\n");
- COMPARE_A32(Sub(r0, pc, 1025), "adr r0, 0x00000007\n"
- "sub r0, #1024\n");
- COMPARE_A32(Sub(r0, pc, 0xffff), "adr r0, 0xffffff09\n"
- "sub r0, #65280\n");
- COMPARE_A32(Sub(r0, pc, 0x10001), "adr r0, 0x00000007\n"
- "sub r0, #65536\n");
- COMPARE_A32(Sub(r0, pc, 0x2345678), "adr r0, 0xfffffd90\n"
- "sub r0, #21504\n"
- "sub r0, #36962304\n");
- COMPARE_A32(Sub(r0, pc, 0x12345678), "adr r0, 0xfffffd90\n"
- "mov ip, #21504\n"
- "movt ip, #4660\n"
- "sub r0, ip\n");
- COMPARE_A32(Sub(r0, pc, 0x7fffffff), "adr r0, 0xffffff09\n"
- "add r0, #256\n"
- "add r0, #2147483648\n");
+ MUST_FAIL_TEST_A32(Sub(r0, pc, -0x12345678),
+ "Ill-formed 'add' instruction.\n");
+ COMPARE_A32(Sub(r0, pc, 1025),
+ "adr r0, 0x00000007\n"
+ "sub r0, #1024\n");
+ COMPARE_A32(Sub(r0, pc, 0xffff),
+ "adr r0, 0xffffff09\n"
+ "sub r0, #65280\n");
+ COMPARE_A32(Sub(r0, pc, 0x10001),
+ "adr r0, 0x00000007\n"
+ "sub r0, #65536\n");
+ COMPARE_A32(Sub(r0, pc, 0x2345678),
+ "adr r0, 0xfffffd90\n"
+ "sub r0, #21504\n"
+ "sub r0, #36962304\n");
+ COMPARE_A32(Sub(r0, pc, 0x12345678),
+ "adr r0, 0xfffffd90\n"
+ "mov ip, #21504\n"
+ "movt ip, #4660\n"
+ "sub r0, ip\n");
+ COMPARE_A32(Sub(r0, pc, 0x7fffffff),
+ "adr r0, 0xffffff09\n"
+ "add r0, #256\n"
+ "add r0, #2147483648\n");
CLEANUP();
}
@@ -2566,15 +2498,19 @@
MUST_FAIL_TEST_T32(Add(r0, pc, 0xffff), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_T32(Add(r0, pc, 0x10002), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_T32(Add(r0, pc, 0x12345678), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_T32(Add(r0, pc, 0x7fffffff), "Ill-formed 'add' instruction.\n");
- COMPARE_T32(Add(r0, pc, -0x12345678), "mov r0, pc\n"
- "mov ip, #22136\n"
- "movt ip, #4660\n"
- "sub r0, ip\n");
- COMPARE_T32(Add(r0, pc, -0x7fffffff), "mov r0, pc\n"
- "add r0, #1\n"
- "add r0, #2147483648\n");
+ MUST_FAIL_TEST_T32(Add(r0, pc, 0x12345678),
+ "Ill-formed 'add' instruction.\n");
+ MUST_FAIL_TEST_T32(Add(r0, pc, 0x7fffffff),
+ "Ill-formed 'add' instruction.\n");
+ COMPARE_T32(Add(r0, pc, -0x12345678),
+ "mov r0, pc\n"
+ "mov ip, #22136\n"
+ "movt ip, #4660\n"
+ "sub r0, ip\n");
+ COMPARE_T32(Add(r0, pc, -0x7fffffff),
+ "mov r0, pc\n"
+ "add r0, #1\n"
+ "add r0, #2147483648\n");
// TODO: This test aborts in the Assembler (with unpredictable instruction
// errors) before the MacroAssembler gets a chance to do something
@@ -2588,15 +2524,19 @@
MUST_FAIL_TEST_T32(Sub(r0, pc, -0xffff), "Ill-formed 'add' instruction.\n");
MUST_FAIL_TEST_T32(Sub(r0, pc, -0x10002), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_T32(Sub(r0, pc, -0x12345678), "Ill-formed 'add' instruction.\n");
- MUST_FAIL_TEST_T32(Sub(r0, pc, -0x7fffffff), "Ill-formed 'add' instruction.\n");
- COMPARE_T32(Sub(r0, pc, 0x12345678), "mov r0, pc\n"
- "mov ip, #22136\n"
- "movt ip, #4660\n"
- "sub r0, ip\n");
- COMPARE_T32(Sub(r0, pc, 0x7fffffff), "mov r0, pc\n"
- "add r0, #1\n"
- "add r0, #2147483648\n");
+ MUST_FAIL_TEST_T32(Sub(r0, pc, -0x12345678),
+ "Ill-formed 'add' instruction.\n");
+ MUST_FAIL_TEST_T32(Sub(r0, pc, -0x7fffffff),
+ "Ill-formed 'add' instruction.\n");
+ COMPARE_T32(Sub(r0, pc, 0x12345678),
+ "mov r0, pc\n"
+ "mov ip, #22136\n"
+ "movt ip, #4660\n"
+ "sub r0, ip\n");
+ COMPARE_T32(Sub(r0, pc, 0x7fffffff),
+ "mov r0, pc\n"
+ "add r0, #1\n"
+ "add r0, #2147483648\n");
CLEANUP();
}
@@ -2697,40 +2637,29 @@
// Move 8, 16 and 32-bit immediates into D registers, duplicated across the
// destination.
- COMPARE_BOTH(Vmov(I8, d0, 0xac),
- "vmov.i8 d0, #172\n");
+ COMPARE_BOTH(Vmov(I8, d0, 0xac), "vmov.i8 d0, #172\n");
- COMPARE_BOTH(Vmov(I16, d0, 0xa4),
- "vmov.i16 d0, #164\n");
- COMPARE_BOTH(Vmov(I16, d0, 0x9797),
- "vmov.i8 d0, #151\n");
+ COMPARE_BOTH(Vmov(I16, d0, 0xa4), "vmov.i16 d0, #164\n");
+ COMPARE_BOTH(Vmov(I16, d0, 0x9797), "vmov.i8 d0, #151\n");
COMPARE_BOTH(Vmov(I16, d0, 0x9ef6),
"mov ip, #40694\n"
"vdup.16 d0, ip\n");
- COMPARE_BOTH(Vmov(I32, d0, 0x6d0000),
- "vmov.i32 d0, #7143424\n");
- COMPARE_BOTH(Vmov(I32, d0, 0x15ffffff),
- "vmvn.i32 d0, #3925868544\n");
- COMPARE_BOTH(Vmov(I32, d0, 0x74747474),
- "vmov.i8 d0, #116\n");
- COMPARE_BOTH(Vmov(I32, d0, 0xff0000ff),
- "vmov.i64 d0, #0xff0000ffff0000ff\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0x6d0000), "vmov.i32 d0, #7143424\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0x15ffffff), "vmvn.i32 d0, #3925868544\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0x74747474), "vmov.i8 d0, #116\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0xff0000ff), "vmov.i64 d0, #0xff0000ffff0000ff\n");
COMPARE_BOTH(Vmov(I32, d0, 0x1ecb9ef6),
"mov ip, #40694\n"
"movt ip, #7883\n"
"vdup.32 d0, ip\n");
- COMPARE_BOTH(Vmov(I32, d0, 0x006d0000),
- "vmov.i32 d0, #7143424\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0x006d0000), "vmov.i32 d0, #7143424\n");
COMPARE_BOTH(Vmov(I32, d0, 0x00004da6),
"mov ip, #19878\n"
"vdup.32 d0, ip\n");
- COMPARE_BOTH(Vmov(I32, d0, 0xffffff55),
- "vmvn.i32 d0, #170\n");
- COMPARE_BOTH(Vmov(I32, d0, 0xffff55ff),
- "vmvn.i32 d0, #43520\n");
- COMPARE_BOTH(Vmov(I32, d0, 0xff55ffff),
- "vmvn.i32 d0, #11141120\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0xffffff55), "vmvn.i32 d0, #170\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0xffff55ff), "vmvn.i32 d0, #43520\n");
+ COMPARE_BOTH(Vmov(I32, d0, 0xff55ffff), "vmvn.i32 d0, #11141120\n");
COMPARE_BOTH(Vmov(I64, d0, UINT64_C(0xa5a5a5a5a5a5a5a5)),
"vmov.i8 d0, #165\n");
@@ -2760,8 +2689,7 @@
"mov ip, #35767\n"
"vmov.32 d0[1], ip\n");
- COMPARE_BOTH(Vmov(F32, d0, 0.5),
- "vmov.f32 d0, #0.5\n");
+ COMPARE_BOTH(Vmov(F32, d0, 0.5), "vmov.f32 d0, #0.5\n");
COMPARE_BOTH(Vmov(F32, d0, 1.1),
"mov ip, #52429\n"
"movt ip, #16268\n"
@@ -2786,31 +2714,23 @@
// Move 8, 16 and 32-bit immediates into Q registers, duplicated across the
// destination.
- COMPARE_BOTH(Vmov(I8, q0, 0xac),
- "vmov.i8 q0, #172\n");
+ COMPARE_BOTH(Vmov(I8, q0, 0xac), "vmov.i8 q0, #172\n");
- COMPARE_BOTH(Vmov(I16, q0, 0xa4),
- "vmov.i16 q0, #164\n");
- COMPARE_BOTH(Vmov(I16, q0, 0x9797),
- "vmov.i8 q0, #151\n");
+ COMPARE_BOTH(Vmov(I16, q0, 0xa4), "vmov.i16 q0, #164\n");
+ COMPARE_BOTH(Vmov(I16, q0, 0x9797), "vmov.i8 q0, #151\n");
COMPARE_BOTH(Vmov(I16, q0, 0x9ef6),
"mov ip, #40694\n"
"vdup.16 q0, ip\n");
- COMPARE_BOTH(Vmov(I32, q0, 0x6d0000),
- "vmov.i32 q0, #7143424\n");
- COMPARE_BOTH(Vmov(I32, q0, 0x15ffffff),
- "vmvn.i32 q0, #3925868544\n");
- COMPARE_BOTH(Vmov(I32, q0, 0x74747474),
- "vmov.i8 q0, #116\n");
- COMPARE_BOTH(Vmov(I32, q0, 0xff0000ff),
- "vmov.i64 q0, #0xff0000ffff0000ff\n");
+ COMPARE_BOTH(Vmov(I32, q0, 0x6d0000), "vmov.i32 q0, #7143424\n");
+ COMPARE_BOTH(Vmov(I32, q0, 0x15ffffff), "vmvn.i32 q0, #3925868544\n");
+ COMPARE_BOTH(Vmov(I32, q0, 0x74747474), "vmov.i8 q0, #116\n");
+ COMPARE_BOTH(Vmov(I32, q0, 0xff0000ff), "vmov.i64 q0, #0xff0000ffff0000ff\n");
COMPARE_BOTH(Vmov(I32, q0, 0x1ecb9ef6),
"mov ip, #40694\n"
"movt ip, #7883\n"
"vdup.32 q0, ip\n");
- COMPARE_BOTH(Vmov(I32, q0, 0x006d0000),
- "vmov.i32 q0, #7143424\n");
+ COMPARE_BOTH(Vmov(I32, q0, 0x006d0000), "vmov.i32 q0, #7143424\n");
COMPARE_BOTH(Vmov(I32, q0, 0x00004da6),
"mov ip, #19878\n"
"vdup.32 q0, ip\n");
@@ -2847,8 +2767,7 @@
"vmov.32 d0[1], ip\n"
"vmov.f64 d1, d0\n");
- COMPARE_BOTH(Vmov(F32, q0, 0.5),
- "vmov.f32 q0, #0.5\n");
+ COMPARE_BOTH(Vmov(F32, q0, 0.5), "vmov.f32 q0, #0.5\n");
COMPARE_BOTH(Vmov(F32, q0, 1.1),
"mov ip, #52429\n"
"movt ip, #16268\n"
@@ -3396,40 +3315,31 @@
SETUP();
#ifdef VIXL_DEBUG
- MUST_FAIL_TEST_BOTH_BLOCK(
- {
- Label label;
- masm.B(&label);
- },
- "Label used but not bound.\n")
+ MUST_FAIL_TEST_BOTH_BLOCK({
+ Label label;
+ masm.B(&label);
+ }, "Label used but not bound.\n")
- MUST_FAIL_TEST_BOTH_BLOCK(
- {
- Label label;
- masm.B(eq, &label);
- },
- "Label used but not bound.\n")
+ MUST_FAIL_TEST_BOTH_BLOCK({
+ Label label;
+ masm.B(eq, &label);
+ }, "Label used but not bound.\n")
- MUST_FAIL_TEST_T32_BLOCK(
- {
- Label label;
- masm.Cbz(r0, &label);
- },
- "Label used but not bound.\n")
+ MUST_FAIL_TEST_T32_BLOCK({
+ Label label;
+ masm.Cbz(r0, &label);
+ }, "Label used but not bound.\n")
- MUST_FAIL_TEST_T32_BLOCK(
- {
- Label label;
- masm.Cbnz(r1, &label);
- },
- "Label used but not bound.\n")
+ MUST_FAIL_TEST_T32_BLOCK({
+ Label label;
+ masm.Cbnz(r1, &label);
+ }, "Label used but not bound.\n")
#endif
CLEANUP();
}
-
TEST(macro_assembler_AddressComputationHelper) {
SETUP();
@@ -3457,34 +3367,26 @@
COMPARE_A32(Ldr(r0, masm.MemOperandComputationHelper(r2, r1, 0x1000, 0xfff)),
"add r2, r1, #4096\n"
"ldr r0, [r2]\n");
- COMPARE_A32(Ldr(r0, masm.MemOperandComputationHelper(r2,
- r1,
- 0xffffffff,
- 0xfff)),
+ COMPARE_A32(Ldr(r0,
+ masm.MemOperandComputationHelper(r2, r1, 0xffffffff, 0xfff)),
"sub r2, r1, #1\n"
"ldr r0, [r2]\n");
// TODO: Improve the code generation for these cases.
- COMPARE_A32(Ldr(r0, masm.MemOperandComputationHelper(r2,
- r1,
- 0x12345678,
- 0xfff)),
+ COMPARE_A32(Ldr(r0,
+ masm.MemOperandComputationHelper(r2, r1, 0x12345678, 0xfff)),
"mov r2, #20480\n"
"movt r2, #4660\n"
"add r2, r1, r2\n"
"ldr r0, [r2, #1656]\n");
- COMPARE_A32(Ldr(r0, masm.MemOperandComputationHelper(r2,
- r1,
- 0x7fffffff,
- 0xfff)),
+ COMPARE_A32(Ldr(r0,
+ masm.MemOperandComputationHelper(r2, r1, 0x7fffffff, 0xfff)),
"sub r2, r1, #1\n"
"sub r2, #2147483648\n"
"ldr r0, [r2]\n");
- COMPARE_A32(Ldr(r0, masm.MemOperandComputationHelper(r2,
- r1,
- 0xffcba000,
- 0xfff)),
+ COMPARE_A32(Ldr(r0,
+ masm.MemOperandComputationHelper(r2, r1, 0xffcba000, 0xfff)),
"sub r2, r1, #286720\n"
"sub r2, #3145728\n"
"ldr r0, [r2]\n");
@@ -3585,14 +3487,13 @@
TEST(vmrs_vmsr) {
SETUP();
- COMPARE_BOTH(Vmsr(FPSCR, r0),
- "vmsr FPSCR, r0\n");
+ COMPARE_BOTH(Vmsr(FPSCR, r0), "vmsr FPSCR, r0\n");
COMPARE_BOTH(Vmrs(RegisterOrAPSR_nzcv(r1.GetCode()), FPSCR),
- "vmrs r1, FPSCR\n");
+ "vmrs r1, FPSCR\n");
COMPARE_BOTH(Vmrs(RegisterOrAPSR_nzcv(pc.GetCode()), FPSCR),
- "vmrs APSR_nzcv, FPSCR\n");
+ "vmrs APSR_nzcv, FPSCR\n");
CLEANUP();
}
@@ -3602,59 +3503,56 @@
SETUP();
// ldm/stm
- COMPARE_BOTH(Ldm(r0, NO_WRITE_BACK, RegisterList(r1)),
- "ldm r0, {r1}\n");
+ COMPARE_BOTH(Ldm(r0, NO_WRITE_BACK, RegisterList(r1)), "ldm r0, {r1}\n");
COMPARE_BOTH(Ldm(r1, NO_WRITE_BACK, RegisterList(r2, r5, r9, r10)),
- "ldm r1, {r2,r5,r9,r10}\n");
+ "ldm r1, {r2,r5,r9,r10}\n");
- COMPARE_BOTH(Ldm(r0, WRITE_BACK, RegisterList(r1, r2)),
- "ldm r0!, {r1,r2}\n");
+ COMPARE_BOTH(Ldm(r0, WRITE_BACK, RegisterList(r1, r2)), "ldm r0!, {r1,r2}\n");
COMPARE_BOTH(Stm(r1, NO_WRITE_BACK, RegisterList(r2, r5, r9, r10)),
- "stm r1, {r2,r5,r9,r10}\n");
+ "stm r1, {r2,r5,r9,r10}\n");
- COMPARE_BOTH(Stm(r0, WRITE_BACK, RegisterList(r1, r2)),
- "stm r0!, {r1,r2}\n");
+ COMPARE_BOTH(Stm(r0, WRITE_BACK, RegisterList(r1, r2)), "stm r0!, {r1,r2}\n");
// ldmda/stmda
COMPARE_A32(Ldmda(r11, WRITE_BACK, RegisterList(r0, r1)),
- "ldmda r11!, {r0,r1}\n");
+ "ldmda r11!, {r0,r1}\n");
COMPARE_A32(Ldmda(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "ldmda r11, {r2,r3}\n");
+ "ldmda r11, {r2,r3}\n");
COMPARE_A32(Stmda(r11, WRITE_BACK, RegisterList(r0, r1)),
- "stmda r11!, {r0,r1}\n");
+ "stmda r11!, {r0,r1}\n");
COMPARE_A32(Stmda(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "stmda r11, {r2,r3}\n");
+ "stmda r11, {r2,r3}\n");
// ldmib/stmib
COMPARE_A32(Ldmib(r11, WRITE_BACK, RegisterList(r0, r1)),
- "ldmib r11!, {r0,r1}\n");
+ "ldmib r11!, {r0,r1}\n");
COMPARE_A32(Ldmib(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "ldmib r11, {r2,r3}\n");
+ "ldmib r11, {r2,r3}\n");
COMPARE_A32(Stmib(r11, WRITE_BACK, RegisterList(r0, r1)),
- "stmib r11!, {r0,r1}\n");
+ "stmib r11!, {r0,r1}\n");
COMPARE_A32(Stmib(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "stmib r11, {r2,r3}\n");
+ "stmib r11, {r2,r3}\n");
// ldmdb/stmdb
COMPARE_BOTH(Ldmdb(r11, WRITE_BACK, RegisterList(r0, r1)),
- "ldmdb r11!, {r0,r1}\n");
+ "ldmdb r11!, {r0,r1}\n");
COMPARE_BOTH(Ldmdb(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "ldmdb r11, {r2,r3}\n");
+ "ldmdb r11, {r2,r3}\n");
COMPARE_BOTH(Stmdb(r11, WRITE_BACK, RegisterList(r0, r1)),
- "stmdb r11!, {r0,r1}\n");
+ "stmdb r11!, {r0,r1}\n");
COMPARE_BOTH(Stmdb(r11, NO_WRITE_BACK, RegisterList(r2, r3)),
- "stmdb r11, {r2,r3}\n");
+ "stmdb r11, {r2,r3}\n");
CLEANUP();
}
@@ -3672,22 +3570,19 @@
UseScratchRegisterScope temps(&masm);
temps.ExcludeAll();
- CHECK_T32_16(Adc(DontCare, r7, r7, r6),
- "adcs r7, r6\n");
+ CHECK_T32_16(Adc(DontCare, r7, r7, r6), "adcs r7, r6\n");
CHECK_T32_16_IT_BLOCK(Adc(DontCare, eq, r7, r7, r6),
"it eq\n"
"adceq r7, r6\n");
- CHECK_T32_16(Add(DontCare, r6, r7, 7),
- "adds r6, r7, #7\n");
+ CHECK_T32_16(Add(DontCare, r6, r7, 7), "adds r6, r7, #7\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, lt, r6, r7, 7),
"it lt\n"
"addlt r6, r7, #7\n");
- CHECK_T32_16(Add(DontCare, r5, r5, 255),
- "adds r5, #255\n");
+ CHECK_T32_16(Add(DontCare, r5, r5, 255), "adds r5, #255\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, lt, r5, r5, 255),
"it lt\n"
@@ -3695,146 +3590,121 @@
// Make sure we select the non flag-setting version here, since
// this can have two potential encodings.
- CHECK_T32_16(Add(DontCare, r1, r1, r2),
- "add r1, r2\n");
+ CHECK_T32_16(Add(DontCare, r1, r1, r2), "add r1, r2\n");
- CHECK_T32_16(Add(DontCare, r1, r2, r7),
- "adds r1, r2, r7\n");
+ CHECK_T32_16(Add(DontCare, r1, r2, r7), "adds r1, r2, r7\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, lt, r1, r2, r7),
"it lt\n"
"addlt r1, r2, r7\n");
- CHECK_T32_16(Add(DontCare, r4, r4, r12),
- "add r4, ip\n");
+ CHECK_T32_16(Add(DontCare, r4, r4, r12), "add r4, ip\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, eq, r4, r4, r12),
"it eq\n"
"addeq r4, ip\n");
- CHECK_T32_16(Add(DontCare, r0, sp, 1020),
- "add r0, sp, #1020\n");
+ CHECK_T32_16(Add(DontCare, r0, sp, 1020), "add r0, sp, #1020\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, ge, r0, sp, 1020),
"it ge\n"
"addge r0, sp, #1020\n");
// The equivalent inside an IT block is deprecated.
- CHECK_T32_16(Add(DontCare, sp, sp, 508),
- "add sp, #508\n");
+ CHECK_T32_16(Add(DontCare, sp, sp, 508), "add sp, #508\n");
- CHECK_T32_16(Add(DontCare, r7, sp, r7),
- "add r7, sp, r7\n");
+ CHECK_T32_16(Add(DontCare, r7, sp, r7), "add r7, sp, r7\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, eq, r7, sp, r7),
"it eq\n"
"addeq r7, sp, r7\n");
- CHECK_T32_16(Add(DontCare, sp, sp, r10),
- "add sp, r10\n");
+ CHECK_T32_16(Add(DontCare, sp, sp, r10), "add sp, r10\n");
CHECK_T32_16_IT_BLOCK(Add(DontCare, eq, sp, sp, r10),
"it eq\n"
"addeq sp, r10\n");
- CHECK_T32_16(And(DontCare, r7, r7, r6),
- "ands r7, r6\n");
+ CHECK_T32_16(And(DontCare, r7, r7, r6), "ands r7, r6\n");
CHECK_T32_16_IT_BLOCK(And(DontCare, eq, r7, r7, r6),
"it eq\n"
"andeq r7, r6\n");
- CHECK_T32_16(Asr(DontCare, r0, r1, 32),
- "asrs r0, r1, #32\n");
+ CHECK_T32_16(Asr(DontCare, r0, r1, 32), "asrs r0, r1, #32\n");
CHECK_T32_16_IT_BLOCK(Asr(DontCare, eq, r0, r1, 32),
"it eq\n"
"asreq r0, r1, #32\n");
- CHECK_T32_16(Asr(DontCare, r0, r0, r1),
- "asrs r0, r1\n");
+ CHECK_T32_16(Asr(DontCare, r0, r0, r1), "asrs r0, r1\n");
CHECK_T32_16_IT_BLOCK(Asr(DontCare, eq, r0, r0, r1),
"it eq\n"
"asreq r0, r1\n");
- CHECK_T32_16(Bic(DontCare, r7, r7, r6),
- "bics r7, r6\n");
+ CHECK_T32_16(Bic(DontCare, r7, r7, r6), "bics r7, r6\n");
CHECK_T32_16_IT_BLOCK(Bic(DontCare, eq, r7, r7, r6),
"it eq\n"
"biceq r7, r6\n");
- CHECK_T32_16(Eor(DontCare, r7, r7, r6),
- "eors r7, r6\n");
+ CHECK_T32_16(Eor(DontCare, r7, r7, r6), "eors r7, r6\n");
CHECK_T32_16_IT_BLOCK(Eor(DontCare, eq, r7, r7, r6),
"it eq\n"
"eoreq r7, r6\n");
- CHECK_T32_16(Lsl(DontCare, r0, r1, 31),
- "lsls r0, r1, #31\n");
+ CHECK_T32_16(Lsl(DontCare, r0, r1, 31), "lsls r0, r1, #31\n");
CHECK_T32_16_IT_BLOCK(Lsl(DontCare, eq, r0, r1, 31),
"it eq\n"
"lsleq r0, r1, #31\n");
- CHECK_T32_16(Lsl(DontCare, r0, r0, r1),
- "lsls r0, r1\n");
+ CHECK_T32_16(Lsl(DontCare, r0, r0, r1), "lsls r0, r1\n");
CHECK_T32_16_IT_BLOCK(Lsl(DontCare, eq, r0, r0, r1),
"it eq\n"
"lsleq r0, r1\n");
- CHECK_T32_16(Lsr(DontCare, r0, r1, 32),
- "lsrs r0, r1, #32\n");
+ CHECK_T32_16(Lsr(DontCare, r0, r1, 32), "lsrs r0, r1, #32\n");
CHECK_T32_16_IT_BLOCK(Lsr(DontCare, eq, r0, r1, 32),
"it eq\n"
"lsreq r0, r1, #32\n");
- CHECK_T32_16(Lsr(DontCare, r0, r0, r1),
- "lsrs r0, r1\n");
+ CHECK_T32_16(Lsr(DontCare, r0, r0, r1), "lsrs r0, r1\n");
CHECK_T32_16_IT_BLOCK(Lsr(DontCare, eq, r0, r0, r1),
"it eq\n"
"lsreq r0, r1\n");
- CHECK_T32_16(Mov(DontCare, r7, 255),
- "movs r7, #255\n");
+ CHECK_T32_16(Mov(DontCare, r7, 255), "movs r7, #255\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r7, 255),
"it eq\n"
"moveq r7, #255\n");
- CHECK_T32_16(Mov(DontCare, r9, r8),
- "mov r9, r8\n");
+ CHECK_T32_16(Mov(DontCare, r9, r8), "mov r9, r8\n");
// Check that we don't try to pick the MOVS register-shifted register variant.
- CHECK_T32_16(Mov(DontCare, r5, r6),
- "mov r5, r6\n");
+ CHECK_T32_16(Mov(DontCare, r5, r6), "mov r5, r6\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r9, r8),
"it eq\n"
"moveq r9, r8\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, ASR, 1)),
- "asrs r5, r6, #1\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, ASR, 1)), "asrs r5, r6, #1\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, ASR, 32)),
- "asrs r5, r6, #32\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, ASR, 32)), "asrs r5, r6, #32\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSR, 1)),
- "lsrs r5, r6, #1\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSR, 1)), "lsrs r5, r6, #1\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSR, 32)),
- "lsrs r5, r6, #32\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSR, 32)), "lsrs r5, r6, #32\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSL, 1)),
- "lsls r5, r6, #1\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSL, 1)), "lsls r5, r6, #1\n");
- CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSL, 31)),
- "lsls r5, r6, #31\n");
+ CHECK_T32_16(Mov(DontCare, r5, Operand(r6, LSL, 31)), "lsls r5, r6, #31\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r5, Operand(r6, ASR, 1)),
"it eq\n"
@@ -3860,133 +3730,109 @@
"it eq\n"
"lsleq r5, r6, #31\n");
- CHECK_T32_16(Mov(DontCare, r7, Operand(r7, ASR, r6)),
- "asrs r7, r6\n");
+ CHECK_T32_16(Mov(DontCare, r7, Operand(r7, ASR, r6)), "asrs r7, r6\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r7, Operand(r7, ASR, r6)),
"it eq\n"
"asreq r7, r6\n");
- CHECK_T32_16(Mov(DontCare, r7, Operand(r7, LSR, r6)),
- "lsrs r7, r6\n");
+ CHECK_T32_16(Mov(DontCare, r7, Operand(r7, LSR, r6)), "lsrs r7, r6\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r7, Operand(r7, LSR, r6)),
"it eq\n"
"lsreq r7, r6\n");
- CHECK_T32_16(Mov(DontCare, r7, Operand(r7, LSL, r6)),
- "lsls r7, r6\n");
+ CHECK_T32_16(Mov(DontCare, r7, Operand(r7, LSL, r6)), "lsls r7, r6\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r7, Operand(r7, LSL, r6)),
"it eq\n"
"lsleq r7, r6\n");
- CHECK_T32_16(Mov(DontCare, r7, Operand(r7, ROR, r6)),
- "rors r7, r6\n");
+ CHECK_T32_16(Mov(DontCare, r7, Operand(r7, ROR, r6)), "rors r7, r6\n");
CHECK_T32_16_IT_BLOCK(Mov(DontCare, eq, r7, Operand(r7, ROR, r6)),
"it eq\n"
"roreq r7, r6\n");
- CHECK_T32_16(Mul(DontCare, r0, r1, r0),
- "muls r0, r1, r0\n");
+ CHECK_T32_16(Mul(DontCare, r0, r1, r0), "muls r0, r1, r0\n");
CHECK_T32_16_IT_BLOCK(Mul(DontCare, eq, r0, r1, r0),
"it eq\n"
"muleq r0, r1, r0\n");
- CHECK_T32_16(Mvn(DontCare, r6, r7),
- "mvns r6, r7\n");
+ CHECK_T32_16(Mvn(DontCare, r6, r7), "mvns r6, r7\n");
CHECK_T32_16_IT_BLOCK(Mvn(DontCare, eq, r6, r7),
"it eq\n"
"mvneq r6, r7\n");
- CHECK_T32_16(Orr(DontCare, r7, r7, r6),
- "orrs r7, r6\n");
+ CHECK_T32_16(Orr(DontCare, r7, r7, r6), "orrs r7, r6\n");
CHECK_T32_16_IT_BLOCK(Orr(DontCare, eq, r7, r7, r6),
"it eq\n"
"orreq r7, r6\n");
- CHECK_T32_16(Ror(DontCare, r0, r0, r1),
- "rors r0, r1\n");
+ CHECK_T32_16(Ror(DontCare, r0, r0, r1), "rors r0, r1\n");
CHECK_T32_16_IT_BLOCK(Ror(DontCare, eq, r0, r0, r1),
"it eq\n"
"roreq r0, r1\n");
- CHECK_T32_16(Rsb(DontCare, r7, r6, 0),
- "rsbs r7, r6, #0\n");
+ CHECK_T32_16(Rsb(DontCare, r7, r6, 0), "rsbs r7, r6, #0\n");
CHECK_T32_16_IT_BLOCK(Rsb(DontCare, eq, r7, r6, 0),
"it eq\n"
"rsbeq r7, r6, #0\n");
- CHECK_T32_16(Sbc(DontCare, r7, r7, r6),
- "sbcs r7, r6\n");
+ CHECK_T32_16(Sbc(DontCare, r7, r7, r6), "sbcs r7, r6\n");
CHECK_T32_16_IT_BLOCK(Sbc(DontCare, eq, r7, r7, r6),
"it eq\n"
"sbceq r7, r6\n");
- CHECK_T32_16(Sub(DontCare, r6, r7, 7),
- "subs r6, r7, #7\n");
+ CHECK_T32_16(Sub(DontCare, r6, r7, 7), "subs r6, r7, #7\n");
CHECK_T32_16_IT_BLOCK(Sub(DontCare, lt, r6, r7, 7),
"it lt\n"
"sublt r6, r7, #7\n");
- CHECK_T32_16(Sub(DontCare, r5, r5, 255),
- "subs r5, #255\n");
+ CHECK_T32_16(Sub(DontCare, r5, r5, 255), "subs r5, #255\n");
CHECK_T32_16_IT_BLOCK(Sub(DontCare, lt, r5, r5, 255),
"it lt\n"
"sublt r5, #255\n");
- CHECK_T32_16(Sub(DontCare, r1, r2, r7),
- "subs r1, r2, r7\n");
+ CHECK_T32_16(Sub(DontCare, r1, r2, r7), "subs r1, r2, r7\n");
CHECK_T32_16_IT_BLOCK(Sub(DontCare, lt, r1, r2, r7),
"it lt\n"
"sublt r1, r2, r7\n");
// The equivalent inside an IT block is deprecated.
- CHECK_T32_16(Sub(DontCare, sp, sp, 508),
- "sub sp, #508\n");
+ CHECK_T32_16(Sub(DontCare, sp, sp, 508), "sub sp, #508\n");
// Generate SUBS for ADD.
- CHECK_T32_16(Add(DontCare, r0, r1, -1),
- "subs r0, r1, #1\n");
+ CHECK_T32_16(Add(DontCare, r0, r1, -1), "subs r0, r1, #1\n");
- CHECK_T32_16(Add(DontCare, r0, r1, -7),
- "subs r0, r1, #7\n");
+ CHECK_T32_16(Add(DontCare, r0, r1, -7), "subs r0, r1, #7\n");
- CHECK_T32_16(Add(DontCare, r6, r6, -1),
- "subs r6, #1\n");
+ CHECK_T32_16(Add(DontCare, r6, r6, -1), "subs r6, #1\n");
- CHECK_T32_16(Add(DontCare, r6, r6, -255),
- "subs r6, #255\n");
+ CHECK_T32_16(Add(DontCare, r6, r6, -255), "subs r6, #255\n");
// Generate ADDS for SUB.
- CHECK_T32_16(Sub(DontCare, r0, r1, -1),
- "adds r0, r1, #1\n");
+ CHECK_T32_16(Sub(DontCare, r0, r1, -1), "adds r0, r1, #1\n");
- CHECK_T32_16(Sub(DontCare, r0, r1, -7),
- "adds r0, r1, #7\n");
+ CHECK_T32_16(Sub(DontCare, r0, r1, -7), "adds r0, r1, #7\n");
- CHECK_T32_16(Sub(DontCare, r6, r6, -1),
- "adds r6, #1\n");
+ CHECK_T32_16(Sub(DontCare, r6, r6, -1), "adds r6, #1\n");
- CHECK_T32_16(Sub(DontCare, r6, r6, -255),
- "adds r6, #255\n");
+ CHECK_T32_16(Sub(DontCare, r6, r6, -255), "adds r6, #255\n");
// Check that we don't change the opcode for INT_MIN.
- COMPARE_T32(Add(DontCare, r6, r6, 0x80000000),
- "add r6, #2147483648\n");
+ COMPARE_T32(Add(DontCare, r6, r6, 0x80000000), "add r6, #2147483648\n");
- COMPARE_T32(Sub(DontCare, r6, r6, 0x80000000),
- "sub r6, #2147483648\n");
+ COMPARE_T32(Sub(DontCare, r6, r6, 0x80000000), "sub r6, #2147483648\n");
CLEANUP();
}
@@ -4033,11 +3879,11 @@
"add r0, r1, #33\n"
"add r0, #1124073472\n");
COMPARE_T32(Add(r0, r1, 0x54321),
- "add r0, r1, #801\n"
- "add r0, #344064\n");
+ "add r0, r1, #801\n"
+ "add r0, #344064\n");
COMPARE_T32(Add(r0, r1, 0x54000321),
- "add r0, r1, #801\n"
- "add r0, #1409286144\n");
+ "add r0, r1, #801\n"
+ "add r0, #1409286144\n");
COMPARE_A32(Sub(r0, r1, 0x4321),
"sub r0, r1, #33\n"
@@ -4058,11 +3904,11 @@
"sub r0, r1, #33\n"
"sub r0, #1124073472\n");
COMPARE_T32(Sub(r0, r1, 0x54321),
- "sub r0, r1, #801\n"
- "sub r0, #344064\n");
+ "sub r0, r1, #801\n"
+ "sub r0, #344064\n");
COMPARE_T32(Sub(r0, r1, 0x54000321),
- "sub r0, r1, #801\n"
- "sub r0, #1409286144\n");
+ "sub r0, r1, #801\n"
+ "sub r0, #1409286144\n");
CLEANUP();
}
diff --git a/test/aarch32/test-utils-aarch32.cc b/test/aarch32/test-utils-aarch32.cc
index 4684bbf..960a9f5 100644
--- a/test/aarch32/test-utils-aarch32.cc
+++ b/test/aarch32/test-utils-aarch32.cc
@@ -31,8 +31,7 @@
namespace vixl {
namespace aarch32 {
-#define VIXL_OFFSET(type, member) \
- offsetof(type, member)
+#define VIXL_OFFSET(type, member) offsetof(type, member)
void RegisterDump::Dump(MacroAssembler* masm) {
UseScratchRegisterScope scratch(masm);
@@ -72,7 +71,8 @@
for (unsigned i = 0; i < kMaxNumberOfDRegisters; i++) {
DRegister rt(i);
- __ Vstr(Untyped64, rt,
+ __ Vstr(Untyped64,
+ rt,
MemOperand(dump_base, d_offset + (i * kDRegSizeInBytes)));
}
@@ -90,13 +90,12 @@
__ Pop(dump_base);
// Dump tmp, dump_base and the stack pointer.
- __ Str(tmp, MemOperand(dump2_base,
- r_offset + (tmp.GetCode() * kRegSizeInBytes)));
+ __ Str(tmp,
+ MemOperand(dump2_base, r_offset + (tmp.GetCode() * kRegSizeInBytes)));
__ Str(dump_base,
MemOperand(dump2_base,
r_offset + (dump_base.GetCode() * kRegSizeInBytes)));
- __ Str(sp, MemOperand(dump2_base,
- r_offset + (kSPRegNum * kRegSizeInBytes)));
+ __ Str(sp, MemOperand(dump2_base, r_offset + (kSPRegNum * kRegSizeInBytes)));
completed_ = true;
}
@@ -105,15 +104,15 @@
bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
if (result != expected) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
- expected, result);
+ expected,
+ result);
}
return expected == result;
}
-bool Equal32(uint32_t expected, const RegisterDump* core,
- const Register& reg) {
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
if (reg.IsPC()) {
printf("Testing the value of the program counter is not supported.");
return false;
@@ -123,7 +122,8 @@
}
-bool Equal32(uint32_t expected, const RegisterDump* core,
+bool Equal32(uint32_t expected,
+ const RegisterDump* core,
const SRegister& sreg) {
return Equal32(expected, core, core->GetSRegisterBits(sreg.GetCode()));
}
@@ -132,14 +132,16 @@
bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
if (result != expected) {
printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- expected, result);
+ expected,
+ result);
}
return expected == result;
}
-bool Equal64(uint64_t expected, const RegisterDump* core,
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
const DRegister& dreg) {
return Equal64(expected, core, core->GetDRegisterBits(dreg.GetCode()));
}
@@ -147,41 +149,39 @@
bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
if ((result.h != expected.h) || (result.l != expected.l)) {
- printf("Expected 0x%016" PRIx64 "%016" PRIx64 "\t "
+ printf("Expected 0x%016" PRIx64 "%016" PRIx64
+ "\t "
"Found 0x%016" PRIx64 "%016" PRIx64 "\n",
- expected.h, expected.l, result.h, result.l);
+ expected.h,
+ expected.l,
+ result.h,
+ result.l);
}
return ((expected.h == result.h) && (expected.l == result.l));
}
-bool Equal128(uint64_t expected_h, uint64_t expected_l,
- const RegisterDump* core, const QRegister& qreg) {
+bool Equal128(uint64_t expected_h,
+ uint64_t expected_l,
+ const RegisterDump* core,
+ const QRegister& qreg) {
vec128_t expected = {expected_l, expected_h};
vec128_t result = core->GetQRegisterBits(qreg.GetCode());
return Equal128(expected, core, result);
}
-static char FlagN(uint32_t flags) {
- return (flags & NFlag) ? 'N' : 'n';
-}
+static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
-static char FlagZ(uint32_t flags) {
- return (flags & ZFlag) ? 'Z' : 'z';
-}
+static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
-static char FlagC(uint32_t flags) {
- return (flags & CFlag) ? 'C' : 'c';
-}
+static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
-static char FlagV(uint32_t flags) {
- return (flags & VFlag) ? 'V' : 'v';
-}
+static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
bool EqualNzcv(uint32_t expected, uint32_t result) {
@@ -189,8 +189,14 @@
VIXL_ASSERT((result & ~NZCVFlag) == 0);
if (result != expected) {
printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
- FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
- FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ FlagN(expected),
+ FlagZ(expected),
+ FlagC(expected),
+ FlagV(expected),
+ FlagN(result),
+ FlagZ(result),
+ FlagC(result),
+ FlagV(result));
return false;
}
@@ -209,12 +215,16 @@
} else {
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
- FloatToRawbits(expected), result);
+ FloatToRawbits(expected),
+ result);
} else {
- printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ printf("Expected %.9f (0x%08" PRIx32
+ ")\t "
"Found %.9f (0x%08" PRIx32 ")\n",
- expected, FloatToRawbits(expected),
- RawbitsToFloat(result), result);
+ expected,
+ FloatToRawbits(expected),
+ RawbitsToFloat(result),
+ result);
}
return false;
}
@@ -233,12 +243,16 @@
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- DoubleToRawbits(expected), DoubleToRawbits(result));
+ DoubleToRawbits(expected),
+ DoubleToRawbits(result));
} else {
- printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ printf("Expected %.17f (0x%016" PRIx64
+ ")\t "
"Found %.17f (0x%016" PRIx64 ")\n",
- expected, DoubleToRawbits(expected),
- RawbitsToDouble(result), result);
+ expected,
+ DoubleToRawbits(expected),
+ RawbitsToDouble(result),
+ result);
}
return false;
}
diff --git a/test/aarch32/test-utils-aarch32.h b/test/aarch32/test-utils-aarch32.h
index 2e7f8f3..dd8ecb5 100644
--- a/test/aarch32/test-utils-aarch32.h
+++ b/test/aarch32/test-utils-aarch32.h
@@ -45,40 +45,40 @@
// Helper constants used to check for condition code combinations. These are
// not part of instruction definitions as no instruction uses them directly.
-const uint32_t NoFlag = 0x0;
-const uint32_t NFlag = 0x80000000;
-const uint32_t ZFlag = 0x40000000;
-const uint32_t CFlag = 0x20000000;
-const uint32_t VFlag = 0x10000000;
-const uint32_t NZFlag = NFlag | ZFlag;
-const uint32_t NCFlag = NFlag | CFlag;
-const uint32_t NVFlag = NFlag | VFlag;
-const uint32_t ZCFlag = ZFlag | CFlag;
-const uint32_t ZVFlag = ZFlag | VFlag;
-const uint32_t CVFlag = CFlag | VFlag;
-const uint32_t NZCFlag = NFlag | ZFlag | CFlag;
-const uint32_t NZVFlag = NFlag | ZFlag | VFlag;
-const uint32_t NCVFlag = NFlag | CFlag | VFlag;
-const uint32_t ZCVFlag = ZFlag | CFlag | VFlag;
+const uint32_t NoFlag = 0x0;
+const uint32_t NFlag = 0x80000000;
+const uint32_t ZFlag = 0x40000000;
+const uint32_t CFlag = 0x20000000;
+const uint32_t VFlag = 0x10000000;
+const uint32_t NZFlag = NFlag | ZFlag;
+const uint32_t NCFlag = NFlag | CFlag;
+const uint32_t NVFlag = NFlag | VFlag;
+const uint32_t ZCFlag = ZFlag | CFlag;
+const uint32_t ZVFlag = ZFlag | VFlag;
+const uint32_t CVFlag = CFlag | VFlag;
+const uint32_t NZCFlag = NFlag | ZFlag | CFlag;
+const uint32_t NZVFlag = NFlag | ZFlag | VFlag;
+const uint32_t NCVFlag = NFlag | CFlag | VFlag;
+const uint32_t ZCVFlag = ZFlag | CFlag | VFlag;
const uint32_t NZCVFlag = NFlag | ZFlag | CFlag | VFlag;
-const uint32_t QFlag = 0x08000000;
+const uint32_t QFlag = 0x08000000;
-const uint32_t GE0Flag = 0x00010000;
-const uint32_t GE1Flag = 0x00020000;
-const uint32_t GE2Flag = 0x00040000;
-const uint32_t GE3Flag = 0x00080000;
-const uint32_t GE01Flag = GE0Flag | GE1Flag;
-const uint32_t GE02Flag = GE0Flag | GE2Flag;
-const uint32_t GE03Flag = GE0Flag | GE3Flag;
-const uint32_t GE12Flag = GE1Flag | GE2Flag;
-const uint32_t GE13Flag = GE1Flag | GE3Flag;
-const uint32_t GE23Flag = GE2Flag | GE3Flag;
-const uint32_t GE012Flag = GE0Flag | GE1Flag | GE2Flag;
-const uint32_t GE013Flag = GE0Flag | GE1Flag | GE3Flag;
-const uint32_t GE023Flag = GE0Flag | GE2Flag | GE3Flag;
-const uint32_t GE123Flag = GE1Flag | GE2Flag | GE3Flag;
+const uint32_t GE0Flag = 0x00010000;
+const uint32_t GE1Flag = 0x00020000;
+const uint32_t GE2Flag = 0x00040000;
+const uint32_t GE3Flag = 0x00080000;
+const uint32_t GE01Flag = GE0Flag | GE1Flag;
+const uint32_t GE02Flag = GE0Flag | GE2Flag;
+const uint32_t GE03Flag = GE0Flag | GE3Flag;
+const uint32_t GE12Flag = GE1Flag | GE2Flag;
+const uint32_t GE13Flag = GE1Flag | GE3Flag;
+const uint32_t GE23Flag = GE2Flag | GE3Flag;
+const uint32_t GE012Flag = GE0Flag | GE1Flag | GE2Flag;
+const uint32_t GE013Flag = GE0Flag | GE1Flag | GE3Flag;
+const uint32_t GE023Flag = GE0Flag | GE2Flag | GE3Flag;
+const uint32_t GE123Flag = GE1Flag | GE2Flag | GE3Flag;
const uint32_t GE0123Flag = GE0Flag | GE1Flag | GE2Flag | GE3Flag;
-const uint32_t GEFlags = GE0123Flag;
+const uint32_t GEFlags = GE0123Flag;
struct vec128_t {
uint64_t l;
@@ -112,10 +112,7 @@
vec128_t GetQRegisterBits(unsigned code) const {
VIXL_ASSERT(IsComplete());
VIXL_ASSERT(code < kNumberOfQRegisters);
- vec128_t content = {
- dump_.d_[code * 2],
- dump_.d_[(code * 2) + 1]
- };
+ vec128_t content = {dump_.d_[code * 2], dump_.d_[(code * 2) + 1]};
return content;
}
@@ -140,9 +137,7 @@
}
// Stack pointer accessors.
- int32_t spreg() const {
- return reg(kSPRegNum);
- }
+ int32_t spreg() const { return reg(kSPRegNum); }
// Flags accessors.
uint32_t flags_nzcv() const {
@@ -150,9 +145,7 @@
return dump_.flags_ & NZCVFlag;
}
- bool IsComplete() const {
- return completed_;
- }
+ bool IsComplete() const { return completed_; }
private:
// Indicate whether the dump operation has been completed.
@@ -176,15 +169,17 @@
bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
bool Equal32(uint32_t expected, const RegisterDump* core, uint32_t result);
-bool Equal32(uint32_t expected, const RegisterDump* core,
+bool Equal32(uint32_t expected,
+ const RegisterDump* core,
const SRegister& sreg);
-bool Equal64(uint64_t expected, const RegisterDump* core,
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
const DRegister& dreg);
-bool Equal128(uint64_t expected_h, uint64_t expected_l,
- const RegisterDump* core, const QRegister& qreg);
-bool EqualFP32(float expected,
- const RegisterDump* core,
- const SRegister& dreg);
+bool Equal128(uint64_t expected_h,
+ uint64_t expected_l,
+ const RegisterDump* core,
+ const QRegister& qreg);
+bool EqualFP32(float expected, const RegisterDump* core, const SRegister& dreg);
bool EqualFP64(double expected,
const RegisterDump* core,
const DRegister& dreg);
diff --git a/test/aarch64/examples/test-examples.cc b/test/aarch64/examples/test-examples.cc
index ec1ce7b..7817536 100644
--- a/test/aarch64/examples/test-examples.cc
+++ b/test/aarch64/examples/test-examples.cc
@@ -39,9 +39,7 @@
using namespace vixl::aarch64;
-TEST(custom_disassembler) {
- TestCustomDisassembler();
-}
+TEST(custom_disassembler) { TestCustomDisassembler(); }
// The tests below only work with the simulator.
@@ -63,30 +61,28 @@
// Multiply two column-major 4x4 matrices of 32 bit floating point values.
// Return a column-major 4x4 matrix of 32 bit floating point values in 'C'.
void MatrixMultiplyC(float C[16], float A[16], float B[16]) {
- C[ 0] = A[ 0]*B[ 0] + A[ 4]*B[ 1] + A[ 8]*B[ 2] + A[12]*B[ 3];
- C[ 1] = A[ 1]*B[ 0] + A[ 5]*B[ 1] + A[ 9]*B[ 2] + A[13]*B[ 3];
- C[ 2] = A[ 2]*B[ 0] + A[ 6]*B[ 1] + A[10]*B[ 2] + A[14]*B[ 3];
- C[ 3] = A[ 3]*B[ 0] + A[ 7]*B[ 1] + A[11]*B[ 2] + A[15]*B[ 3];
+ C[0] = A[0] * B[0] + A[4] * B[1] + A[8] * B[2] + A[12] * B[3];
+ C[1] = A[1] * B[0] + A[5] * B[1] + A[9] * B[2] + A[13] * B[3];
+ C[2] = A[2] * B[0] + A[6] * B[1] + A[10] * B[2] + A[14] * B[3];
+ C[3] = A[3] * B[0] + A[7] * B[1] + A[11] * B[2] + A[15] * B[3];
- C[ 4] = A[ 0]*B[ 4] + A[ 4]*B[ 5] + A[ 8]*B[ 6] + A[12]*B[ 7];
- C[ 5] = A[ 1]*B[ 4] + A[ 5]*B[ 5] + A[ 9]*B[ 6] + A[13]*B[ 7];
- C[ 6] = A[ 2]*B[ 4] + A[ 6]*B[ 5] + A[10]*B[ 6] + A[14]*B[ 7];
- C[ 7] = A[ 3]*B[ 4] + A[ 7]*B[ 5] + A[11]*B[ 6] + A[15]*B[ 7];
+ C[4] = A[0] * B[4] + A[4] * B[5] + A[8] * B[6] + A[12] * B[7];
+ C[5] = A[1] * B[4] + A[5] * B[5] + A[9] * B[6] + A[13] * B[7];
+ C[6] = A[2] * B[4] + A[6] * B[5] + A[10] * B[6] + A[14] * B[7];
+ C[7] = A[3] * B[4] + A[7] * B[5] + A[11] * B[6] + A[15] * B[7];
- C[ 8] = A[ 0]*B[ 8] + A[ 4]*B[ 9] + A[ 8]*B[10] + A[12]*B[11];
- C[ 9] = A[ 1]*B[ 8] + A[ 5]*B[ 9] + A[ 9]*B[10] + A[13]*B[11];
- C[10] = A[ 2]*B[ 8] + A[ 6]*B[ 9] + A[10]*B[10] + A[14]*B[11];
- C[11] = A[ 3]*B[ 8] + A[ 7]*B[ 9] + A[11]*B[10] + A[15]*B[11];
+ C[8] = A[0] * B[8] + A[4] * B[9] + A[8] * B[10] + A[12] * B[11];
+ C[9] = A[1] * B[8] + A[5] * B[9] + A[9] * B[10] + A[13] * B[11];
+ C[10] = A[2] * B[8] + A[6] * B[9] + A[10] * B[10] + A[14] * B[11];
+ C[11] = A[3] * B[8] + A[7] * B[9] + A[11] * B[10] + A[15] * B[11];
- C[12] = A[ 0]*B[12] + A[ 4]*B[13] + A[ 8]*B[14] + A[12]*B[15];
- C[13] = A[ 1]*B[12] + A[ 5]*B[13] + A[ 9]*B[14] + A[13]*B[15];
- C[14] = A[ 2]*B[12] + A[ 6]*B[13] + A[10]*B[14] + A[14]*B[15];
- C[15] = A[ 3]*B[12] + A[ 7]*B[13] + A[11]*B[14] + A[15]*B[15];
+ C[12] = A[0] * B[12] + A[4] * B[13] + A[8] * B[14] + A[12] * B[15];
+ C[13] = A[1] * B[12] + A[5] * B[13] + A[9] * B[14] + A[13] * B[15];
+ C[14] = A[2] * B[12] + A[6] * B[13] + A[10] * B[14] + A[14] * B[15];
+ C[15] = A[3] * B[12] + A[7] * B[13] + A[11] * B[14] + A[15] * B[15];
}
-double Add3DoubleC(double x, double y, double z) {
- return x + y + z;
-}
+double Add3DoubleC(double x, double y, double z) { return x + y + z; }
double Add4DoubleC(uint64_t a, double b, uint64_t c, double d) {
return static_cast<double>(a) + b + static_cast<double>(c) + d;
@@ -103,7 +99,7 @@
}
-void GenerateTestWrapper(MacroAssembler* masm, RegisterDump *regs) {
+void GenerateTestWrapper(MacroAssembler* masm, RegisterDump* regs) {
__ Push(xzr, lr);
__ Blr(x15);
regs->Dump(masm);
@@ -112,92 +108,91 @@
}
-#define TEST_FUNCTION(Func) \
- do { \
- int64_t saved_xregs[13]; \
- saved_xregs[0] = simulator.ReadXRegister(19); \
- saved_xregs[1] = simulator.ReadXRegister(20); \
- saved_xregs[2] = simulator.ReadXRegister(21); \
- saved_xregs[3] = simulator.ReadXRegister(22); \
- saved_xregs[4] = simulator.ReadXRegister(23); \
- saved_xregs[5] = simulator.ReadXRegister(24); \
- saved_xregs[6] = simulator.ReadXRegister(25); \
- saved_xregs[7] = simulator.ReadXRegister(26); \
- saved_xregs[8] = simulator.ReadXRegister(27); \
- saved_xregs[9] = simulator.ReadXRegister(28); \
- saved_xregs[10] = simulator.ReadXRegister(29); \
- saved_xregs[11] = simulator.ReadXRegister(30); \
- saved_xregs[12] = simulator.ReadXRegister(31); \
- \
- uint64_t saved_dregs[8]; \
- saved_dregs[0] = simulator.ReadDRegisterBits(8); \
- saved_dregs[1] = simulator.ReadDRegisterBits(9); \
- saved_dregs[2] = simulator.ReadDRegisterBits(10); \
- saved_dregs[3] = simulator.ReadDRegisterBits(11); \
- saved_dregs[4] = simulator.ReadDRegisterBits(12); \
- saved_dregs[5] = simulator.ReadDRegisterBits(13); \
- saved_dregs[6] = simulator.ReadDRegisterBits(14); \
- saved_dregs[7] = simulator.ReadDRegisterBits(15); \
- \
- simulator.WriteXRegister(15, masm.GetLabelAddress<uint64_t>(&Func));\
- simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&test)); \
- \
- VIXL_CHECK(saved_xregs[0] == simulator.ReadXRegister(19)); \
- VIXL_CHECK(saved_xregs[1] == simulator.ReadXRegister(20)); \
- VIXL_CHECK(saved_xregs[2] == simulator.ReadXRegister(21)); \
- VIXL_CHECK(saved_xregs[3] == simulator.ReadXRegister(22)); \
- VIXL_CHECK(saved_xregs[4] == simulator.ReadXRegister(23)); \
- VIXL_CHECK(saved_xregs[5] == simulator.ReadXRegister(24)); \
- VIXL_CHECK(saved_xregs[6] == simulator.ReadXRegister(25)); \
- VIXL_CHECK(saved_xregs[7] == simulator.ReadXRegister(26)); \
- VIXL_CHECK(saved_xregs[8] == simulator.ReadXRegister(27)); \
- VIXL_CHECK(saved_xregs[9] == simulator.ReadXRegister(28)); \
- VIXL_CHECK(saved_xregs[10] == simulator.ReadXRegister(29)); \
- VIXL_CHECK(saved_xregs[11] == simulator.ReadXRegister(30)); \
- VIXL_CHECK(saved_xregs[12] == simulator.ReadXRegister(31)); \
- \
- VIXL_CHECK(saved_dregs[0] == simulator.ReadDRegisterBits(8)); \
- VIXL_CHECK(saved_dregs[1] == simulator.ReadDRegisterBits(9)); \
- VIXL_CHECK(saved_dregs[2] == simulator.ReadDRegisterBits(10)); \
- VIXL_CHECK(saved_dregs[3] == simulator.ReadDRegisterBits(11)); \
- VIXL_CHECK(saved_dregs[4] == simulator.ReadDRegisterBits(12)); \
- VIXL_CHECK(saved_dregs[5] == simulator.ReadDRegisterBits(13)); \
- VIXL_CHECK(saved_dregs[6] == simulator.ReadDRegisterBits(14)); \
- VIXL_CHECK(saved_dregs[7] == simulator.ReadDRegisterBits(15)); \
- \
+#define TEST_FUNCTION(Func) \
+ do { \
+ int64_t saved_xregs[13]; \
+ saved_xregs[0] = simulator.ReadXRegister(19); \
+ saved_xregs[1] = simulator.ReadXRegister(20); \
+ saved_xregs[2] = simulator.ReadXRegister(21); \
+ saved_xregs[3] = simulator.ReadXRegister(22); \
+ saved_xregs[4] = simulator.ReadXRegister(23); \
+ saved_xregs[5] = simulator.ReadXRegister(24); \
+ saved_xregs[6] = simulator.ReadXRegister(25); \
+ saved_xregs[7] = simulator.ReadXRegister(26); \
+ saved_xregs[8] = simulator.ReadXRegister(27); \
+ saved_xregs[9] = simulator.ReadXRegister(28); \
+ saved_xregs[10] = simulator.ReadXRegister(29); \
+ saved_xregs[11] = simulator.ReadXRegister(30); \
+ saved_xregs[12] = simulator.ReadXRegister(31); \
+ \
+ uint64_t saved_dregs[8]; \
+ saved_dregs[0] = simulator.ReadDRegisterBits(8); \
+ saved_dregs[1] = simulator.ReadDRegisterBits(9); \
+ saved_dregs[2] = simulator.ReadDRegisterBits(10); \
+ saved_dregs[3] = simulator.ReadDRegisterBits(11); \
+ saved_dregs[4] = simulator.ReadDRegisterBits(12); \
+ saved_dregs[5] = simulator.ReadDRegisterBits(13); \
+ saved_dregs[6] = simulator.ReadDRegisterBits(14); \
+ saved_dregs[7] = simulator.ReadDRegisterBits(15); \
+ \
+ simulator.WriteXRegister(15, masm.GetLabelAddress<uint64_t>(&Func)); \
+ simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&test)); \
+ \
+ VIXL_CHECK(saved_xregs[0] == simulator.ReadXRegister(19)); \
+ VIXL_CHECK(saved_xregs[1] == simulator.ReadXRegister(20)); \
+ VIXL_CHECK(saved_xregs[2] == simulator.ReadXRegister(21)); \
+ VIXL_CHECK(saved_xregs[3] == simulator.ReadXRegister(22)); \
+ VIXL_CHECK(saved_xregs[4] == simulator.ReadXRegister(23)); \
+ VIXL_CHECK(saved_xregs[5] == simulator.ReadXRegister(24)); \
+ VIXL_CHECK(saved_xregs[6] == simulator.ReadXRegister(25)); \
+ VIXL_CHECK(saved_xregs[7] == simulator.ReadXRegister(26)); \
+ VIXL_CHECK(saved_xregs[8] == simulator.ReadXRegister(27)); \
+ VIXL_CHECK(saved_xregs[9] == simulator.ReadXRegister(28)); \
+ VIXL_CHECK(saved_xregs[10] == simulator.ReadXRegister(29)); \
+ VIXL_CHECK(saved_xregs[11] == simulator.ReadXRegister(30)); \
+ VIXL_CHECK(saved_xregs[12] == simulator.ReadXRegister(31)); \
+ \
+ VIXL_CHECK(saved_dregs[0] == simulator.ReadDRegisterBits(8)); \
+ VIXL_CHECK(saved_dregs[1] == simulator.ReadDRegisterBits(9)); \
+ VIXL_CHECK(saved_dregs[2] == simulator.ReadDRegisterBits(10)); \
+ VIXL_CHECK(saved_dregs[3] == simulator.ReadDRegisterBits(11)); \
+ VIXL_CHECK(saved_dregs[4] == simulator.ReadDRegisterBits(12)); \
+ VIXL_CHECK(saved_dregs[5] == simulator.ReadDRegisterBits(13)); \
+ VIXL_CHECK(saved_dregs[6] == simulator.ReadDRegisterBits(14)); \
+ VIXL_CHECK(saved_dregs[7] == simulator.ReadDRegisterBits(15)); \
+ \
} while (0)
-#define START() \
- MacroAssembler masm; \
- Decoder decoder; \
- Debugger simulator(&decoder); \
- simulator.SetColouredTrace(Test::coloured_trace()); \
- PrintDisassembler* pdis = NULL; \
- Instrument* inst = NULL; \
- if (Test::trace_sim()) { \
- pdis = new PrintDisassembler(stdout); \
- decoder.PrependVisitor(pdis); \
- } \
- if (Test::instruction_stats()) { \
- inst = new Instrument("vixl_stats.csv", 10); \
- inst->Enable(); \
- decoder.AppendVisitor(inst); \
- } \
- RegisterDump regs; \
- \
- Label test; \
- masm.Bind(&test); \
- GenerateTestWrapper(&masm, ®s); \
+#define START() \
+ MacroAssembler masm; \
+ Decoder decoder; \
+ Debugger simulator(&decoder); \
+ simulator.SetColouredTrace(Test::coloured_trace()); \
+ PrintDisassembler* pdis = NULL; \
+ Instrument* inst = NULL; \
+ if (Test::trace_sim()) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder.PrependVisitor(pdis); \
+ } \
+ if (Test::instruction_stats()) { \
+ inst = new Instrument("vixl_stats.csv", 10); \
+ inst->Enable(); \
+ decoder.AppendVisitor(inst); \
+ } \
+ RegisterDump regs; \
+ \
+ Label test; \
+ masm.Bind(&test); \
+ GenerateTestWrapper(&masm, ®s); \
masm.FinalizeCode()
-
-#define FACTORIAL_DOTEST(N) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, N); \
- TEST_FUNCTION(factorial); \
- VIXL_CHECK(static_cast<uint64_t>(regs.xreg(0)) == FactorialC(N)); \
+#define FACTORIAL_DOTEST(N) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, N); \
+ TEST_FUNCTION(factorial); \
+ VIXL_CHECK(static_cast<uint64_t>(regs.xreg(0)) == FactorialC(N)); \
} while (0)
TEST(factorial) {
@@ -217,12 +212,12 @@
}
-#define FACTORIAL_REC_DOTEST(N) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, N); \
- TEST_FUNCTION(factorial_rec); \
- VIXL_CHECK(static_cast<uint64_t>(regs.xreg(0)) == FactorialC(N)); \
+#define FACTORIAL_REC_DOTEST(N) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, N); \
+ TEST_FUNCTION(factorial_rec); \
+ VIXL_CHECK(static_cast<uint64_t>(regs.xreg(0)) == FactorialC(N)); \
} while (0)
TEST(factorial_rec) {
@@ -258,15 +253,39 @@
// Fill the two input matrices with some 32 bit floating point values.
- mat1[0] = 1.0f; mat1[4] = 2.0f; mat1[ 8] = 3.0f; mat1[12] = 4.0f;
- mat1[1] = 52.03f; mat1[5] = 12.24f; mat1[ 9] = 53.56f; mat1[13] = 22.22f;
- mat1[2] = 4.43f; mat1[6] = 5.00f; mat1[10] = 7.00f; mat1[14] = 3.11f;
- mat1[3] = 43.47f; mat1[7] = 10.97f; mat1[11] = 37.78f; mat1[15] = 90.91f;
+ mat1[0] = 1.0f;
+ mat1[4] = 2.0f;
+ mat1[8] = 3.0f;
+ mat1[12] = 4.0f;
+ mat1[1] = 52.03f;
+ mat1[5] = 12.24f;
+ mat1[9] = 53.56f;
+ mat1[13] = 22.22f;
+ mat1[2] = 4.43f;
+ mat1[6] = 5.00f;
+ mat1[10] = 7.00f;
+ mat1[14] = 3.11f;
+ mat1[3] = 43.47f;
+ mat1[7] = 10.97f;
+ mat1[11] = 37.78f;
+ mat1[15] = 90.91f;
- mat2[0] = 1.0f; mat2[4] = 11.24f; mat2[ 8] = 21.00f; mat2[12] = 21.31f;
- mat2[1] = 2.0f; mat2[5] = 2.24f; mat2[ 9] = 8.56f; mat2[13] = 52.03f;
- mat2[2] = 3.0f; mat2[6] = 51.00f; mat2[10] = 21.00f; mat2[14] = 33.11f;
- mat2[3] = 4.0f; mat2[7] = 0.00f; mat2[11] = 84.00f; mat2[15] = 1.97f;
+ mat2[0] = 1.0f;
+ mat2[4] = 11.24f;
+ mat2[8] = 21.00f;
+ mat2[12] = 21.31f;
+ mat2[1] = 2.0f;
+ mat2[5] = 2.24f;
+ mat2[9] = 8.56f;
+ mat2[13] = 52.03f;
+ mat2[2] = 3.0f;
+ mat2[6] = 51.00f;
+ mat2[10] = 21.00f;
+ mat2[14] = 33.11f;
+ mat2[3] = 4.0f;
+ mat2[7] = 0.00f;
+ mat2[11] = 84.00f;
+ mat2[15] = 1.97f;
MatrixMultiplyC(expected, mat1, mat2);
@@ -294,8 +313,8 @@
// Initialize input data for the example function.
uint8_t A[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 200};
- uint8_t B[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, \
- 30, 31, 50};
+ uint8_t B[] =
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 50};
uint8_t D[ARRAY_SIZE(A)];
uintptr_t A_addr = reinterpret_cast<uintptr_t>(A);
uintptr_t B_addr = reinterpret_cast<uintptr_t>(B);
@@ -322,14 +341,14 @@
}
}
-#define ADD3_DOUBLE_DOTEST(A, B, C) \
- do { \
- simulator.ResetState(); \
- simulator.WriteDRegister(0, A); \
- simulator.WriteDRegister(1, B); \
- simulator.WriteDRegister(2, C); \
- TEST_FUNCTION(add3_double); \
- VIXL_CHECK(regs.dreg(0) == Add3DoubleC(A, B, C)); \
+#define ADD3_DOUBLE_DOTEST(A, B, C) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteDRegister(0, A); \
+ simulator.WriteDRegister(1, B); \
+ simulator.WriteDRegister(2, C); \
+ TEST_FUNCTION(add3_double); \
+ VIXL_CHECK(regs.dreg(0) == Add3DoubleC(A, B, C)); \
} while (0)
TEST(add3_double) {
@@ -347,15 +366,15 @@
}
-#define ADD4_DOUBLE_DOTEST(A, B, C, D) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, A); \
- simulator.WriteDRegister(0, B); \
- simulator.WriteXRegister(1, C); \
- simulator.WriteDRegister(1, D); \
- TEST_FUNCTION(add4_double); \
- VIXL_CHECK(regs.dreg(0) == Add4DoubleC(A, B, C, D)); \
+#define ADD4_DOUBLE_DOTEST(A, B, C, D) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, A); \
+ simulator.WriteDRegister(0, B); \
+ simulator.WriteXRegister(1, C); \
+ simulator.WriteDRegister(1, D); \
+ TEST_FUNCTION(add4_double); \
+ VIXL_CHECK(regs.dreg(0) == Add4DoubleC(A, B, C, D)); \
} while (0)
TEST(add4_double) {
@@ -374,14 +393,14 @@
}
-#define SUM_ARRAY_DOTEST(Array) \
- do { \
- simulator.ResetState(); \
- uintptr_t addr = reinterpret_cast<uintptr_t>(Array); \
- simulator.WriteXRegister(0, addr); \
- simulator.WriteXRegister(1, ARRAY_SIZE(Array)); \
- TEST_FUNCTION(sum_array); \
- VIXL_CHECK(regs.xreg(0) == SumArrayC(Array, ARRAY_SIZE(Array))); \
+#define SUM_ARRAY_DOTEST(Array) \
+ do { \
+ simulator.ResetState(); \
+ uintptr_t addr = reinterpret_cast<uintptr_t>(Array); \
+ simulator.WriteXRegister(0, addr); \
+ simulator.WriteXRegister(1, ARRAY_SIZE(Array)); \
+ TEST_FUNCTION(sum_array); \
+ VIXL_CHECK(regs.xreg(0) == SumArrayC(Array, ARRAY_SIZE(Array))); \
} while (0)
TEST(sum_array) {
@@ -392,25 +411,24 @@
GenerateSumArray(&masm);
masm.FinalizeCode();
- uint8_t data1[] = { 4, 9, 13, 3, 2, 6, 5 };
+ uint8_t data1[] = {4, 9, 13, 3, 2, 6, 5};
SUM_ARRAY_DOTEST(data1);
- uint8_t data2[] = { 42 };
+ uint8_t data2[] = {42};
SUM_ARRAY_DOTEST(data2);
uint8_t data3[1000];
- for (unsigned int i = 0; i < ARRAY_SIZE(data3); ++i)
- data3[i] = 255;
+ for (unsigned int i = 0; i < ARRAY_SIZE(data3); ++i) data3[i] = 255;
SUM_ARRAY_DOTEST(data3);
}
-#define ABS_DOTEST(X) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, X); \
- TEST_FUNCTION(func_abs); \
- VIXL_CHECK(regs.xreg(0) == abs(X)); \
+#define ABS_DOTEST(X) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, X); \
+ TEST_FUNCTION(func_abs); \
+ VIXL_CHECK(regs.xreg(0) == abs(X)); \
} while (0)
TEST(abs) {
@@ -436,7 +454,7 @@
GenerateCrc32(&masm);
masm.FinalizeCode();
- const char *msg = "Hello World!";
+ const char* msg = "Hello World!";
uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
size_t msg_size = strlen(msg);
int64_t chksum = INT64_C(0xe3d6e35c);
@@ -490,14 +508,14 @@
}
-#define CHECKBOUNDS_DOTEST(Value, Low, High) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, Value); \
- simulator.WriteXRegister(1, Low); \
- simulator.WriteXRegister(2, High); \
- TEST_FUNCTION(check_bounds); \
- VIXL_CHECK(regs.xreg(0) == ((Low <= Value) && (Value <= High))); \
+#define CHECKBOUNDS_DOTEST(Value, Low, High) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, Value); \
+ simulator.WriteXRegister(1, Low); \
+ simulator.WriteXRegister(2, High); \
+ TEST_FUNCTION(check_bounds); \
+ VIXL_CHECK(regs.xreg(0) == ((Low <= Value) && (Value <= High))); \
} while (0)
TEST(check_bounds) {
@@ -520,12 +538,12 @@
}
-#define GETTING_STARTED_DOTEST(Value) \
- do { \
- simulator.ResetState(); \
- simulator.WriteXRegister(0, Value); \
- TEST_FUNCTION(demo_function); \
- VIXL_CHECK(regs.xreg(0) == (Value & 0x1122334455667788)); \
+#define GETTING_STARTED_DOTEST(Value) \
+ do { \
+ simulator.ResetState(); \
+ simulator.WriteXRegister(0, Value); \
+ TEST_FUNCTION(demo_function); \
+ VIXL_CHECK(regs.xreg(0) == (Value & 0x1122334455667788)); \
} while (0)
TEST(getting_started) {
@@ -566,8 +584,8 @@
TEST(literal_example) {
VIXL_ASSERT(LiteralExample(1, 2) == 3);
- VIXL_ASSERT(
- LiteralExample(INT64_C(0x100000000), 0x1) == INT64_C(0x100000001));
+ VIXL_ASSERT(LiteralExample(INT64_C(0x100000000), 0x1) ==
+ INT64_C(0x100000001));
}
diff --git a/test/aarch64/test-abi.cc b/test/aarch64/test-abi.cc
index aa7e65b..e823c7e 100644
--- a/test/aarch64/test-abi.cc
+++ b/test/aarch64/test-abi.cc
@@ -35,7 +35,7 @@
#ifdef VIXL_HAS_ABI_SUPPORT
-#define TEST(name) TEST_(AARCH64_ABI_##name)
+#define TEST(name) TEST_(AARCH64_ABI_##name)
namespace vixl {
namespace aarch64 {
@@ -51,7 +51,9 @@
VIXL_CHECK(abi.GetReturnGenericOperand<char>().Equals(GenericOperand(w0)));
VIXL_CHECK(abi.GetReturnGenericOperand<int8_t>().Equals(GenericOperand(w0)));
VIXL_CHECK(abi.GetReturnGenericOperand<uint8_t>().Equals(GenericOperand(w0)));
- VIXL_CHECK(abi.GetReturnGenericOperand<short>().Equals(GenericOperand(w0))); // NOLINT(runtime/int)
+ VIXL_CHECK(
+ abi.GetReturnGenericOperand<short>().Equals( // NOLINT(runtime/int)
+ GenericOperand(w0)));
VIXL_CHECK(abi.GetReturnGenericOperand<int16_t>().Equals(GenericOperand(w0)));
VIXL_CHECK(
abi.GetReturnGenericOperand<uint16_t>().Equals(GenericOperand(w0)));
@@ -68,45 +70,46 @@
GenericOperand found(NoReg);
GenericOperand expected(NoReg);
-#define CHECK_NEXT_PARAMETER_REG(type, reg) \
- found = abi.GetNextParameterGenericOperand<type>(); \
- expected = GenericOperand(reg); \
- VIXL_CHECK(found.Equals(expected))
- // Slots on the stack are always 8 bytes.
-#define CHECK_NEXT_PARAMETER_MEM(type, mem_op, size) \
- found = abi.GetNextParameterGenericOperand<type>(); \
- expected = GenericOperand(mem_op, size); \
- VIXL_CHECK(found.Equals(expected))
+#define CHECK_NEXT_PARAMETER_REG(type, reg) \
+ found = abi.GetNextParameterGenericOperand<type>(); \
+ expected = GenericOperand(reg); \
+ VIXL_CHECK(found.Equals(expected))
+// Slots on the stack are always 8 bytes.
+#define CHECK_NEXT_PARAMETER_MEM(type, mem_op, size) \
+ found = abi.GetNextParameterGenericOperand<type>(); \
+ expected = GenericOperand(mem_op, size); \
+ VIXL_CHECK(found.Equals(expected))
abi.Reset();
- CHECK_NEXT_PARAMETER_REG(int, w0);
- CHECK_NEXT_PARAMETER_REG(char, w1);
- CHECK_NEXT_PARAMETER_REG(bool, w2);
- CHECK_NEXT_PARAMETER_REG(float, s0);
- CHECK_NEXT_PARAMETER_REG(double, d1);
- CHECK_NEXT_PARAMETER_REG(double, d2);
- CHECK_NEXT_PARAMETER_REG(float, s3);
- CHECK_NEXT_PARAMETER_REG(int64_t, x3);
+ CHECK_NEXT_PARAMETER_REG(int, w0);
+ CHECK_NEXT_PARAMETER_REG(char, w1);
+ CHECK_NEXT_PARAMETER_REG(bool, w2);
+ CHECK_NEXT_PARAMETER_REG(float, s0);
+ CHECK_NEXT_PARAMETER_REG(double, d1);
+ CHECK_NEXT_PARAMETER_REG(double, d2);
+ CHECK_NEXT_PARAMETER_REG(float, s3);
+ CHECK_NEXT_PARAMETER_REG(int64_t, x3);
CHECK_NEXT_PARAMETER_REG(uint64_t, x4);
- CHECK_NEXT_PARAMETER_REG(void*, x5);
+ CHECK_NEXT_PARAMETER_REG(void*, x5);
CHECK_NEXT_PARAMETER_REG(uint32_t, w6);
typedef short my_type; // NOLINT(runtime/int)
- CHECK_NEXT_PARAMETER_REG(my_type, w7);
- CHECK_NEXT_PARAMETER_MEM(int, MemOperand(sp, 0), kWRegSizeInBytes);
- CHECK_NEXT_PARAMETER_MEM(int, MemOperand(sp, 8), kWRegSizeInBytes);
- CHECK_NEXT_PARAMETER_REG(double, d4);
- CHECK_NEXT_PARAMETER_REG(double, d5);
- CHECK_NEXT_PARAMETER_REG(double, d6);
- CHECK_NEXT_PARAMETER_REG(double, d7);
- CHECK_NEXT_PARAMETER_MEM(double, MemOperand(sp, 16), kDRegSizeInBytes);
- CHECK_NEXT_PARAMETER_MEM(bool, MemOperand(sp, 24), kWRegSizeInBytes);
- CHECK_NEXT_PARAMETER_MEM(short, MemOperand(sp, 32), kWRegSizeInBytes); // NOLINT(runtime/int)
- CHECK_NEXT_PARAMETER_MEM(float, MemOperand(sp, 40), kSRegSizeInBytes);
- CHECK_NEXT_PARAMETER_MEM(float, MemOperand(sp, 48), kSRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_REG(my_type, w7);
+ CHECK_NEXT_PARAMETER_MEM(int, MemOperand(sp, 0), kWRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_MEM(int, MemOperand(sp, 8), kWRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_REG(double, d4);
+ CHECK_NEXT_PARAMETER_REG(double, d5);
+ CHECK_NEXT_PARAMETER_REG(double, d6);
+ CHECK_NEXT_PARAMETER_REG(double, d7);
+ CHECK_NEXT_PARAMETER_MEM(double, MemOperand(sp, 16), kDRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_MEM(bool, MemOperand(sp, 24), kWRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_MEM(short, // NOLINT(runtime/int)
+ MemOperand(sp, 32),
+ kWRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_MEM(float, MemOperand(sp, 40), kSRegSizeInBytes);
+ CHECK_NEXT_PARAMETER_MEM(float, MemOperand(sp, 48), kSRegSizeInBytes);
VIXL_CHECK(abi.GetStackSpaceRequired() == 56);
}
-
-
-}} // namespace vixl::aarch64
+}
+} // namespace vixl::aarch64
#endif // VIXL_ABI_SUPORT
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 78f0643..50e7800 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -94,132 +94,124 @@
#define __ masm.
-#define TEST(name) TEST_(AARCH64_ASM_##name)
+#define TEST(name) TEST_(AARCH64_ASM_##name)
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
// Run tests with the simulator.
-#define SETUP() \
- MacroAssembler masm; \
+#define SETUP() \
+ MacroAssembler masm; \
SETUP_COMMON()
-#define SETUP_CUSTOM(size, pic) \
- byte* buf = new byte[size + CodeBuffer::kDefaultCapacity]; \
- MacroAssembler masm(buf, size + CodeBuffer::kDefaultCapacity, pic); \
+#define SETUP_CUSTOM(size, pic) \
+ byte* buf = new byte[size + CodeBuffer::kDefaultCapacity]; \
+ MacroAssembler masm(buf, size + CodeBuffer::kDefaultCapacity, pic); \
SETUP_COMMON()
-#define SETUP_COMMON() \
- masm.SetGenerateSimulatorCode(true); \
- Decoder simulator_decoder; \
- Simulator* simulator = \
- Test::run_debugger() ? new Debugger(&simulator_decoder) \
- : new Simulator(&simulator_decoder); \
- simulator->SetColouredTrace(Test::coloured_trace()); \
- simulator->SetInstructionStats(Test::instruction_stats()); \
- Disassembler disasm; \
- Decoder disassembler_decoder; \
- disassembler_decoder.AppendVisitor(&disasm); \
+#define SETUP_COMMON() \
+ masm.SetGenerateSimulatorCode(true); \
+ Decoder simulator_decoder; \
+ Simulator* simulator = Test::run_debugger() \
+ ? new Debugger(&simulator_decoder) \
+ : new Simulator(&simulator_decoder); \
+ simulator->SetColouredTrace(Test::coloured_trace()); \
+ simulator->SetInstructionStats(Test::instruction_stats()); \
+ Disassembler disasm; \
+ Decoder disassembler_decoder; \
+ disassembler_decoder.AppendVisitor(&disasm); \
RegisterDump core
-// This is a convenience macro to avoid creating a scope for every assembler
-// function called. It will still assert the buffer hasn't been exceeded.
-#define ALLOW_ASM() \
- CodeBufferCheckScope guard(&masm, masm.GetBuffer()->GetCapacity())
-
-#define START() \
- masm.Reset(); \
- simulator->ResetState(); \
- __ PushCalleeSavedRegisters(); \
- { \
- int trace_parameters = 0; \
- if (Test::trace_reg()) trace_parameters |= LOG_STATE; \
- if (Test::trace_write()) trace_parameters |= LOG_WRITE; \
- if (Test::trace_sim()) trace_parameters |= LOG_DISASM; \
- if (Test::trace_branch()) trace_parameters |= LOG_BRANCH; \
- if (trace_parameters != 0) { \
- __ Trace(static_cast<TraceParameters>(trace_parameters), TRACE_ENABLE); \
- } \
- } \
- if (Test::instruction_stats()) { \
- __ EnableInstrumentation(); \
+#define START() \
+ masm.Reset(); \
+ simulator->ResetState(); \
+ __ PushCalleeSavedRegisters(); \
+ { \
+ int trace_parameters = 0; \
+ if (Test::trace_reg()) trace_parameters |= LOG_STATE; \
+ if (Test::trace_write()) trace_parameters |= LOG_WRITE; \
+ if (Test::trace_sim()) trace_parameters |= LOG_DISASM; \
+ if (Test::trace_branch()) trace_parameters |= LOG_BRANCH; \
+ if (trace_parameters != 0) { \
+ __ Trace(static_cast<TraceParameters>(trace_parameters), TRACE_ENABLE); \
+ } \
+ } \
+ if (Test::instruction_stats()) { \
+ __ EnableInstrumentation(); \
}
-#define END() \
- if (Test::instruction_stats()) { \
- __ DisableInstrumentation(); \
- } \
- __ Trace(LOG_ALL, TRACE_DISABLE); \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
+#define END() \
+ if (Test::instruction_stats()) { \
+ __ DisableInstrumentation(); \
+ } \
+ __ Trace(LOG_ALL, TRACE_DISABLE); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
masm.FinalizeCode()
-#define RUN() \
- DISASSEMBLE(); \
+#define RUN() \
+ DISASSEMBLE(); \
simulator->RunFrom(masm.GetBuffer()->GetStartAddress<Instruction*>())
#define RUN_CUSTOM() RUN()
#define TEARDOWN() TEARDOWN_COMMON()
-#define TEARDOWN_CUSTOM() \
- delete[] buf; \
+#define TEARDOWN_CUSTOM() \
+ delete[] buf; \
TEARDOWN_COMMON()
-#define TEARDOWN_COMMON() \
- delete simulator;
+#define TEARDOWN_COMMON() delete simulator;
#else // ifdef VIXL_INCLUDE_SIMULATOR_AARCH64.
// Run the test on real hardware or models.
-#define SETUP() \
- MacroAssembler masm; \
+#define SETUP() \
+ MacroAssembler masm; \
SETUP_COMMON()
-#define SETUP_CUSTOM(size, pic) \
- byte *buffer = reinterpret_cast<byte*>( \
- mmap(NULL, size + CodeBuffer::kDefaultCapacity, \
- PROT_READ | PROT_WRITE, \
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); \
- size_t buffer_size = size + CodeBuffer::kDefaultCapacity; \
- MacroAssembler masm(buffer, buffer_size, pic); \
+#define SETUP_CUSTOM(size, pic) \
+ byte* buffer = \
+ reinterpret_cast<byte*>(mmap(NULL, \
+ size + CodeBuffer::kDefaultCapacity, \
+ PROT_READ | PROT_WRITE, \
+ MAP_PRIVATE | MAP_ANONYMOUS, \
+ -1, \
+ 0)); \
+ size_t buffer_size = size + CodeBuffer::kDefaultCapacity; \
+ MacroAssembler masm(buffer, buffer_size, pic); \
SETUP_COMMON()
-#define SETUP_COMMON() \
- Disassembler disasm; \
- Decoder disassembler_decoder; \
- disassembler_decoder.AppendVisitor(&disasm); \
- masm.SetGenerateSimulatorCode(false); \
- RegisterDump core; \
+#define SETUP_COMMON() \
+ Disassembler disasm; \
+ Decoder disassembler_decoder; \
+ disassembler_decoder.AppendVisitor(&disasm); \
+ masm.SetGenerateSimulatorCode(false); \
+ RegisterDump core; \
CPU::SetUp()
-// This is a convenience macro to avoid creating a scope for every assembler
-// function called. It will still assert the buffer hasn't been exceeded.
-#define ALLOW_ASM() \
- CodeBufferCheckScope guard(&masm, masm.GetBuffer()->GetCapacity())
-
-#define START() \
- masm.Reset(); \
+#define START() \
+ masm.Reset(); \
__ PushCalleeSavedRegisters()
-#define END() \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
masm.FinalizeCode()
// Execute the generated code from the memory area.
-#define RUN() \
- DISASSEMBLE(); \
- masm.GetBuffer()->SetExecutable(); \
- ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
- masm.GetSizeOfCodeGenerated()); \
+#define RUN() \
+ DISASSEMBLE(); \
+ masm.GetBuffer()->SetExecutable(); \
+ ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
+ masm.GetSizeOfCodeGenerated()); \
masm.GetBuffer()->SetWritable()
// The generated code was written directly into `buffer`, execute it directly.
-#define RUN_CUSTOM() \
- DISASSEMBLE(); \
- mprotect(buffer, buffer_size, PROT_READ | PROT_EXEC); \
- ExecuteMemory(buffer, buffer_size); \
+#define RUN_CUSTOM() \
+ DISASSEMBLE(); \
+ mprotect(buffer, buffer_size, PROT_READ | PROT_EXEC); \
+ ExecuteMemory(buffer, buffer_size); \
mprotect(buffer, buffer_size, PROT_READ | PROT_WRITE)
#define TEARDOWN()
@@ -228,42 +220,42 @@
#endif // ifdef VIXL_INCLUDE_SIMULATOR_AARCH64.
-#define DISASSEMBLE() \
- if (Test::disassemble()) { \
- Instruction* instruction = \
- masm.GetBuffer()->GetStartAddress<Instruction*>(); \
- Instruction* end = masm.GetBuffer()->GetOffsetAddress<Instruction*>( \
- masm.GetSizeOfCodeGenerated()); \
- while (instruction != end) { \
- disassembler_decoder.Decode(instruction); \
- uint32_t encoding = *reinterpret_cast<uint32_t*>(instruction); \
- printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
- instruction += kInstructionSize; \
- } \
+#define DISASSEMBLE() \
+ if (Test::disassemble()) { \
+ Instruction* instruction = \
+ masm.GetBuffer()->GetStartAddress<Instruction*>(); \
+ Instruction* end = masm.GetBuffer()->GetOffsetAddress<Instruction*>( \
+ masm.GetSizeOfCodeGenerated()); \
+ while (instruction != end) { \
+ disassembler_decoder.Decode(instruction); \
+ uint32_t encoding = *reinterpret_cast<uint32_t*>(instruction); \
+ printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
+ instruction += kInstructionSize; \
+ } \
}
-#define ASSERT_EQUAL_NZCV(expected) \
+#define ASSERT_EQUAL_NZCV(expected) \
VIXL_CHECK(EqualNzcv(expected, core.flags_nzcv()))
-#define ASSERT_EQUAL_REGISTERS(expected) \
+#define ASSERT_EQUAL_REGISTERS(expected) \
VIXL_CHECK(EqualRegisters(&expected, &core))
-#define ASSERT_EQUAL_32(expected, result) \
+#define ASSERT_EQUAL_32(expected, result) \
VIXL_CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
-#define ASSERT_EQUAL_FP32(expected, result) \
+#define ASSERT_EQUAL_FP32(expected, result) \
VIXL_CHECK(EqualFP32(expected, &core, result))
-#define ASSERT_EQUAL_64(expected, result) \
+#define ASSERT_EQUAL_64(expected, result) \
VIXL_CHECK(Equal64(expected, &core, result))
-#define ASSERT_EQUAL_FP64(expected, result) \
+#define ASSERT_EQUAL_FP64(expected, result) \
VIXL_CHECK(EqualFP64(expected, &core, result))
-#define ASSERT_EQUAL_128(expected_h, expected_l, result) \
+#define ASSERT_EQUAL_128(expected_h, expected_l, result) \
VIXL_CHECK(Equal128(expected_h, expected_l, &core, result))
-#define ASSERT_LITERAL_POOL_SIZE(expected) \
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
VIXL_CHECK((expected + kInstructionSize) == (masm.GetLiteralPoolSize()))
@@ -463,7 +455,6 @@
TEST(mov) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x0, 0xffffffffffffffff);
@@ -473,9 +464,12 @@
__ Mov(x0, 0x0123456789abcdef);
- __ movz(x1, UINT64_C(0xabcd) << 16);
- __ movk(x2, UINT64_C(0xabcd) << 32);
- __ movn(x3, UINT64_C(0xabcd) << 48);
+ {
+ ExactAssemblyScope scope(&masm, 3 * kInstructionSize);
+ __ movz(x1, UINT64_C(0xabcd) << 16);
+ __ movk(x2, UINT64_C(0xabcd) << 32);
+ __ movn(x3, UINT64_C(0xabcd) << 48);
+ }
__ Mov(x4, 0x0123456789abcdef);
__ Mov(x5, x4);
@@ -1698,17 +1692,17 @@
START();
__ Mov(x0, 0x1);
__ Mov(x1, 0x0);
- __ Mov(x22, lr); // Save lr.
+ __ Mov(x22, lr); // Save lr.
__ B(&label_1);
__ B(&label_1);
- __ B(&label_1); // Multiple branches to the same label.
+ __ B(&label_1); // Multiple branches to the same label.
__ Mov(x0, 0x0);
__ Bind(&label_2);
- __ B(&label_3); // Forward branch.
+ __ B(&label_3); // Forward branch.
__ Mov(x0, 0x0);
__ Bind(&label_1);
- __ B(&label_2); // Backward branch.
+ __ B(&label_2); // Backward branch.
__ Mov(x0, 0x0);
__ Bind(&label_3);
__ Bl(&label_4);
@@ -1774,10 +1768,10 @@
Label label_1, label_2, label_3, label_4;
START();
- __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
- __ Adr(x1, &label_3); // Set to zero to indicate success.
+ __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
+ __ Adr(x1, &label_3); // Set to zero to indicate success.
- __ Adr(x2, &label_1); // Multiple forward references to the same label.
+ __ Adr(x2, &label_1); // Multiple forward references to the same label.
__ Adr(x3, &label_1);
__ Adr(x4, &label_1);
@@ -1789,17 +1783,17 @@
__ Br(x2); // label_1, label_3
__ Bind(&label_3);
- __ Adr(x2, &label_3); // Self-reference (offset 0).
+ __ Adr(x2, &label_3); // Self-reference (offset 0).
__ Eor(x1, x1, Operand(x2));
- __ Adr(x2, &label_4); // Simple forward reference.
- __ Br(x2); // label_4
+ __ Adr(x2, &label_4); // Simple forward reference.
+ __ Br(x2); // label_4
__ Bind(&label_1);
- __ Adr(x2, &label_3); // Multiple reverse references to the same label.
+ __ Adr(x2, &label_3); // Multiple reverse references to the same label.
__ Adr(x3, &label_3);
__ Adr(x4, &label_3);
- __ Adr(x5, &label_2); // Simple reverse reference.
- __ Br(x5); // label_2
+ __ Adr(x5, &label_2); // Simple reverse reference.
+ __ Br(x5); // label_2
__ Bind(&label_4);
END();
@@ -2030,7 +2024,6 @@
TEST(branch_cond) {
SETUP();
- ALLOW_ASM();
Label done, wrong;
@@ -2096,13 +2089,19 @@
// The MacroAssembler does not allow al as a branch condition.
Label ok_5;
- __ b(&ok_5, al);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ b(&ok_5, al);
+ }
__ Mov(x0, 0x0);
__ Bind(&ok_5);
// The MacroAssembler does not allow nv as a branch condition.
Label ok_6;
- __ b(&ok_6, nv);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ b(&ok_6, nv);
+ }
__ Mov(x0, 0x0);
__ Bind(&ok_6);
@@ -2559,7 +2558,7 @@
// This value won't fit in the immediate offset field of ldr/str instructions.
int largeoffset = 0xabcdef;
- int64_t data[3] = { 0x1122334455667788, 0, 0 };
+ int64_t data[3] = {0x1122334455667788, 0, 0};
uint64_t base_addr = reinterpret_cast<uintptr_t>(data);
uint64_t drifted_addr = base_addr - largeoffset;
@@ -2853,11 +2852,10 @@
TEST(load_store_q) {
SETUP();
- uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe,
- 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
- 0x21, 0x43, 0x65, 0x87, 0xa9, 0xcb, 0xed, 0x0f,
- 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
- 0x24, 0x46, 0x68, 0x8a, 0xac, 0xce, 0xe0, 0x02,
+ uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe, 0x01, 0x23,
+ 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x21, 0x43, 0x65, 0x87,
+ 0xa9, 0xcb, 0xed, 0x0f, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc,
+ 0xde, 0xf0, 0x24, 0x46, 0x68, 0x8a, 0xac, 0xce, 0xe0, 0x02,
0x42, 0x64, 0x86, 0xa8, 0xca, 0xec, 0x0e, 0x20};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
@@ -3057,11 +3055,20 @@
__ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex));
__ Ld1(v3.V8B(), v4.V8B(), MemOperand(x18, 16, PostIndex));
__ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex));
- __ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
+ __ Ld1(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ v19.V2S(),
MemOperand(x20, 32, PostIndex));
- __ Ld1(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(),
+ __ Ld1(v30.V2S(),
+ v31.V2S(),
+ v0.V2S(),
+ v1.V2S(),
MemOperand(x21, 32, PostIndex));
- __ Ld1(v20.V1D(), v21.V1D(), v22.V1D(), v23.V1D(),
+ __ Ld1(v20.V1D(),
+ v21.V1D(),
+ v22.V1D(),
+ v23.V1D(),
MemOperand(x22, 32, PostIndex));
END();
@@ -3158,9 +3165,15 @@
__ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex));
__ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex));
__ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex));
- __ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(),
+ __ Ld1(v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
+ v19.V4S(),
MemOperand(x20, 64, PostIndex));
- __ Ld1(v30.V2D(), v31.V2D(), v0.V2D(), v1.V2D(),
+ __ Ld1(v30.V2D(),
+ v31.V2D(),
+ v0.V2D(),
+ v1.V2D(),
MemOperand(x21, 64, PostIndex));
END();
@@ -3408,7 +3421,6 @@
ASSERT_EQUAL_128(0x232221201f1e1d1c, 0x131211100f0e0d0c, q0);
-
ASSERT_EQUAL_64(src_base + 1, x17);
ASSERT_EQUAL_64(src_base + 1 + 32, x18);
ASSERT_EQUAL_64(src_base + 2 + 32, x19);
@@ -3582,8 +3594,6 @@
ASSERT_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q15);
-
-
ASSERT_EQUAL_64(src_base + 32, x17);
ASSERT_EQUAL_64(src_base + 32, x18);
ASSERT_EQUAL_64(src_base + 32, x19);
@@ -4233,15 +4243,30 @@
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
- __ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(),
+ __ Ld4(v2.V8B(),
+ v3.V8B(),
+ v4.V8B(),
+ v5.V8B(),
MemOperand(x17, x22, PostIndex));
- __ Ld4(v6.V8B(), v7.V8B(), v8.V8B(), v9.V8B(),
+ __ Ld4(v6.V8B(),
+ v7.V8B(),
+ v8.V8B(),
+ v9.V8B(),
MemOperand(x18, 32, PostIndex));
- __ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(),
+ __ Ld4(v10.V4H(),
+ v11.V4H(),
+ v12.V4H(),
+ v13.V4H(),
MemOperand(x19, 32, PostIndex));
- __ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(),
+ __ Ld4(v14.V2S(),
+ v15.V2S(),
+ v16.V2S(),
+ v17.V2S(),
MemOperand(x20, 32, PostIndex));
- __ Ld4(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(),
+ __ Ld4(v30.V2S(),
+ v31.V2S(),
+ v0.V2S(),
+ v1.V2S(),
MemOperand(x21, 32, PostIndex));
END();
@@ -4343,15 +4368,30 @@
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
- __ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
+ __ Ld4(v2.V16B(),
+ v3.V16B(),
+ v4.V16B(),
+ v5.V16B(),
MemOperand(x17, x22, PostIndex));
- __ Ld4(v6.V16B(), v7.V16B(), v8.V16B(), v9.V16B(),
+ __ Ld4(v6.V16B(),
+ v7.V16B(),
+ v8.V16B(),
+ v9.V16B(),
MemOperand(x18, 64, PostIndex));
- __ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
+ __ Ld4(v10.V8H(),
+ v11.V8H(),
+ v12.V8H(),
+ v13.V8H(),
MemOperand(x19, 64, PostIndex));
- __ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(),
+ __ Ld4(v14.V4S(),
+ v15.V4S(),
+ v16.V4S(),
+ v17.V4S(),
MemOperand(x20, 64, PostIndex));
- __ Ld4(v30.V2D(), v31.V2D(), v0.V2D(), v1.V2D(),
+ __ Ld4(v30.V2D(),
+ v31.V2D(),
+ v0.V2D(),
+ v1.V2D(),
MemOperand(x21, 64, PostIndex));
END();
@@ -4379,7 +4419,6 @@
ASSERT_EQUAL_128(0x434241403f3e3d3c, 0x232221201f1e1d1c, q1);
-
ASSERT_EQUAL_64(src_base + 1, x17);
ASSERT_EQUAL_64(src_base + 1 + 64, x18);
ASSERT_EQUAL_64(src_base + 2 + 64, x19);
@@ -4497,7 +4536,6 @@
}
-
TEST(neon_ld4_lane_postindex) {
SETUP();
@@ -4512,25 +4550,26 @@
// Test loading whole register by element.
__ Mov(x17, src_base);
for (int i = 15; i >= 0; i--) {
- __ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i,
- MemOperand(x17, 4, PostIndex));
+ __ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex));
}
__ Mov(x18, src_base);
for (int i = 7; i >= 0; i--) {
- __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i,
- MemOperand(x18, 8, PostIndex));
+ __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x18, 8, PostIndex));
}
__ Mov(x19, src_base);
for (int i = 3; i >= 0; i--) {
- __ Ld4(v8.S(), v9.S(), v10.S(), v11.S(), i,
- MemOperand(x19, 16, PostIndex));
+ __ Ld4(v8.S(), v9.S(), v10.S(), v11.S(), i, MemOperand(x19, 16, PostIndex));
}
__ Mov(x20, src_base);
for (int i = 1; i >= 0; i--) {
- __ Ld4(v12.D(), v13.D(), v14.D(), v15.D(), i,
+ __ Ld4(v12.D(),
+ v13.D(),
+ v14.D(),
+ v15.D(),
+ i,
MemOperand(x20, 32, PostIndex));
}
@@ -4546,7 +4585,11 @@
__ Ldr(q17, MemOperand(x4, 16, PostIndex));
__ Ldr(q18, MemOperand(x4, 16, PostIndex));
__ Ldr(q19, MemOperand(x4));
- __ Ld4(v16.B(), v17.B(), v18.B(), v19.B(), 4,
+ __ Ld4(v16.B(),
+ v17.B(),
+ v18.B(),
+ v19.B(),
+ 4,
MemOperand(x21, x25, PostIndex));
__ Add(x25, x25, 1);
@@ -4555,7 +4598,11 @@
__ Ldr(q21, MemOperand(x5, 16, PostIndex));
__ Ldr(q22, MemOperand(x5, 16, PostIndex));
__ Ldr(q23, MemOperand(x5));
- __ Ld4(v20.H(), v21.H(), v22.H(), v23.H(), 3,
+ __ Ld4(v20.H(),
+ v21.H(),
+ v22.H(),
+ v23.H(),
+ 3,
MemOperand(x22, x25, PostIndex));
__ Add(x25, x25, 1);
@@ -4564,7 +4611,11 @@
__ Ldr(q25, MemOperand(x6, 16, PostIndex));
__ Ldr(q26, MemOperand(x6, 16, PostIndex));
__ Ldr(q27, MemOperand(x6));
- __ Ld4(v24.S(), v25.S(), v26.S(), v27.S(), 2,
+ __ Ld4(v24.S(),
+ v25.S(),
+ v26.S(),
+ v27.S(),
+ 2,
MemOperand(x23, x25, PostIndex));
__ Add(x25, x25, 1);
@@ -4573,7 +4624,11 @@
__ Ldr(q29, MemOperand(x7, 16, PostIndex));
__ Ldr(q30, MemOperand(x7, 16, PostIndex));
__ Ldr(q31, MemOperand(x7));
- __ Ld4(v28.D(), v29.D(), v30.D(), v31.D(), 1,
+ __ Ld4(v28.D(),
+ v29.D(),
+ v30.D(),
+ v31.D(),
+ 1,
MemOperand(x24, x25, PostIndex));
END();
@@ -4704,19 +4759,40 @@
START();
__ Mov(x17, src_base + 1);
__ Mov(x18, 1);
- __ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
+ __ Ld4r(v0.V8B(),
+ v1.V8B(),
+ v2.V8B(),
+ v3.V8B(),
MemOperand(x17, 4, PostIndex));
- __ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(),
+ __ Ld4r(v4.V16B(),
+ v5.V16B(),
+ v6.V16B(),
+ v7.V16B(),
MemOperand(x17, x18, PostIndex));
- __ Ld4r(v8.V4H(), v9.V4H(), v10.V4H(), v11.V4H(),
+ __ Ld4r(v8.V4H(),
+ v9.V4H(),
+ v10.V4H(),
+ v11.V4H(),
MemOperand(x17, x18, PostIndex));
- __ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(),
+ __ Ld4r(v12.V8H(),
+ v13.V8H(),
+ v14.V8H(),
+ v15.V8H(),
MemOperand(x17, 8, PostIndex));
- __ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
+ __ Ld4r(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ v19.V2S(),
MemOperand(x17, x18, PostIndex));
- __ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(),
+ __ Ld4r(v20.V4S(),
+ v21.V4S(),
+ v22.V4S(),
+ v23.V4S(),
MemOperand(x17, 16, PostIndex));
- __ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(),
+ __ Ld4r(v24.V2D(),
+ v25.V2D(),
+ v26.V2D(),
+ v27.V2D(),
MemOperand(x17, 32, PostIndex));
END();
@@ -5375,12 +5451,18 @@
__ Ldr(d19, MemOperand(x17, x19));
__ Ldr(d20, MemOperand(x17, x18));
- __ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
+ __ St1(v0.V2S(),
+ v1.V2S(),
+ v2.V2S(),
+ v3.V2S(),
MemOperand(x17, 32, PostIndex));
__ Ldr(q21, MemOperand(x17, x21));
__ Ldr(q22, MemOperand(x17, x19));
- __ St1(v0.V1D(), v1.V1D(), v2.V1D(), v3.V1D(),
+ __ St1(v0.V1D(),
+ v1.V1D(),
+ v2.V1D(),
+ v3.V1D(),
MemOperand(x17, 32, PostIndex));
__ Ldr(q23, MemOperand(x17, x21));
__ Ldr(q24, MemOperand(x17, x19));
@@ -5486,7 +5568,10 @@
__ Ldr(q20, MemOperand(x17, x19));
__ Ldr(q21, MemOperand(x17, x18));
- __ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
+ __ St1(v0.V2D(),
+ v1.V2D(),
+ v2.V2D(),
+ v3.V2D(),
MemOperand(x17, 64, PostIndex));
__ Ldr(q22, MemOperand(x17, x21));
__ Ldr(q23, MemOperand(x17, x20));
@@ -5515,7 +5600,7 @@
TEST(neon_st2_d) {
SETUP();
- uint8_t src[4*16];
+ uint8_t src[4 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5555,7 +5640,7 @@
TEST(neon_st2_d_postindex) {
SETUP();
- uint8_t src[4*16];
+ uint8_t src[4 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5593,7 +5678,7 @@
TEST(neon_st2_q) {
SETUP();
- uint8_t src[5*16];
+ uint8_t src[5 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5634,7 +5719,7 @@
TEST(neon_st2_q_postindex) {
SETUP();
- uint8_t src[5*16];
+ uint8_t src[5 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5676,7 +5761,7 @@
TEST(neon_st3_d) {
SETUP();
- uint8_t src[3*16];
+ uint8_t src[3 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5714,7 +5799,7 @@
TEST(neon_st3_d_postindex) {
SETUP();
- uint8_t src[4*16];
+ uint8_t src[4 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5755,7 +5840,7 @@
TEST(neon_st3_q) {
SETUP();
- uint8_t src[6*16];
+ uint8_t src[6 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5802,7 +5887,7 @@
TEST(neon_st3_q_postindex) {
SETUP();
- uint8_t src[7*16];
+ uint8_t src[7 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5849,7 +5934,7 @@
TEST(neon_st4_d) {
SETUP();
- uint8_t src[4*16];
+ uint8_t src[4 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5892,7 +5977,7 @@
TEST(neon_st4_d_postindex) {
SETUP();
- uint8_t src[5*16];
+ uint8_t src[5 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5907,12 +5992,17 @@
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
+ __ St4(v0.V8B(),
+ v1.V8B(),
+ v2.V8B(),
+ v3.V8B(),
MemOperand(x18, x22, PostIndex));
- __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(),
+ __ St4(v0.V4H(),
+ v1.V4H(),
+ v2.V4H(),
+ v3.V4H(),
MemOperand(x18, 32, PostIndex));
- __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
- MemOperand(x18));
+ __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
__ Mov(x19, src_base);
@@ -5939,7 +6029,7 @@
TEST(neon_st4_q) {
SETUP();
- uint8_t src[7*16];
+ uint8_t src[7 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -5990,7 +6080,7 @@
TEST(neon_st4_q_postindex) {
SETUP();
- uint8_t src[9*16];
+ uint8_t src[9 * 16];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
@@ -6005,14 +6095,22 @@
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(),
+ __ St4(v0.V16B(),
+ v1.V16B(),
+ v2.V16B(),
+ v3.V16B(),
MemOperand(x18, x22, PostIndex));
- __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(),
+ __ St4(v0.V8H(),
+ v1.V8H(),
+ v2.V8H(),
+ v3.V8H(),
MemOperand(x18, 64, PostIndex));
- __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(),
+ __ St4(v0.V4S(),
+ v1.V4S(),
+ v2.V4S(),
+ v3.V4S(),
MemOperand(x18, x22, PostIndex));
- __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
- MemOperand(x18));
+ __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -6141,7 +6239,12 @@
__ Mov(v27, v1);
__ Mov(v28, v2);
__ Mov(v29, v3);
- __ Tbl(v26.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B(), v26.V16B());
+ __ Tbl(v26.V16B(),
+ v26.V16B(),
+ v27.V16B(),
+ v28.V16B(),
+ v29.V16B(),
+ v26.V16B());
END();
RUN();
@@ -6192,7 +6295,12 @@
__ Mov(v27, v1);
__ Mov(v28, v2);
__ Mov(v29, v3);
- __ Tbx(v26.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B(), v26.V16B());
+ __ Tbx(v26.V16B(),
+ v26.V16B(),
+ v27.V16B(),
+ v28.V16B(),
+ v29.V16B(),
+ v26.V16B());
END();
RUN();
@@ -6310,8 +6418,10 @@
TEST(ldp_stp_quad) {
SETUP();
- uint64_t src[4] = {0x0123456789abcdef, 0xaaaaaaaa55555555,
- 0xfedcba9876543210, 0x55555555aaaaaaaa};
+ uint64_t src[4] = {0x0123456789abcdef,
+ 0xaaaaaaaa55555555,
+ 0xfedcba9876543210,
+ 0x55555555aaaaaaaa};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6343,7 +6453,8 @@
TEST(ldp_stp_offset) {
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+ uint64_t src[3] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
0xffeeddccbbaa9988};
uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -6397,7 +6508,8 @@
TEST(ldp_stp_offset_wide) {
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+ uint64_t src[3] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
0xffeeddccbbaa9988};
uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -6454,8 +6566,10 @@
TEST(ldnp_stnp_offset) {
SETUP();
- uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
- 0xffeeddccbbaa9988, 0x7766554433221100};
+ uint64_t src[4] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
+ 0xffeeddccbbaa9988,
+ 0x7766554433221100};
uint64_t dst[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6626,7 +6740,8 @@
TEST(ldp_stp_preindex) {
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+ uint64_t src[3] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
0xffeeddccbbaa9988};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -6680,7 +6795,8 @@
TEST(ldp_stp_preindex_wide) {
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+ uint64_t src[3] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
0xffeeddccbbaa9988};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -6697,7 +6813,7 @@
__ Mov(x19, x24);
__ Mov(x24, src_base - base_offset + 4);
__ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
- __ Stp(w2, w3, MemOperand(x25, 4 - base_offset , PreIndex));
+ __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PreIndex));
__ Mov(x20, x25);
__ Mov(x25, dst_base + base_offset + 4);
__ Mov(x24, src_base - base_offset);
@@ -6742,8 +6858,10 @@
TEST(ldp_stp_postindex) {
SETUP();
- uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
- 0xffeeddccbbaa9988, 0x7766554433221100};
+ uint64_t src[4] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
+ 0xffeeddccbbaa9988,
+ 0x7766554433221100};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6796,8 +6914,10 @@
TEST(ldp_stp_postindex_wide) {
SETUP();
- uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
- 0xffeeddccbbaa9988, 0x7766554433221100};
+ uint64_t src[4] = {0x0011223344556677,
+ 0x8899aabbccddeeff,
+ 0xffeeddccbbaa9988,
+ 0x7766554433221100};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6922,8 +7042,7 @@
TEST(ldur_stur_fp) {
SETUP();
- int64_t src[3] = {0x0123456789abcdef, 0x0123456789abcdef,
- 0x0123456789abcdef};
+ int64_t src[3] = {0x0123456789abcdef, 0x0123456789abcdef, 0x0123456789abcdef};
int64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -7050,10 +7169,12 @@
TEST(ldr_literal_values_q) {
SETUP();
- static const uint64_t kHalfValues[] = {
- 0x8000000000000000, 0x7fffffffffffffff, 0x0000000000000000,
- 0xffffffffffffffff, 0x00ff00ff00ff00ff, 0x1234567890abcdef
- };
+ static const uint64_t kHalfValues[] = {0x8000000000000000,
+ 0x7fffffffffffffff,
+ 0x0000000000000000,
+ 0xffffffffffffffff,
+ 0x00ff00ff00ff00ff,
+ 0x1234567890abcdef};
const int card = sizeof(kHalfValues) / sizeof(kHalfValues[0]);
const Register& ref_low64 = x1;
const Register& ref_high64 = x2;
@@ -7091,9 +7212,9 @@
void LoadIntValueHelper(T values[], int card) {
SETUP();
- const bool is_32bits = (sizeof(T) == 4);
- const Register& tgt1 = is_32bits ? w1 : x1;
- const Register& tgt2 = is_32bits ? w2 : x2;
+ const bool is_32bit = (sizeof(T) == 4);
+ Register tgt1 = is_32bit ? Register(w1) : Register(x1);
+ Register tgt2 = is_32bit ? Register(w2) : Register(x2);
START();
__ Mov(x0, 0);
@@ -7117,20 +7238,25 @@
TEST(ldr_literal_values_x) {
- static const uint64_t kValues[] = {
- 0x8000000000000000, 0x7fffffffffffffff, 0x0000000000000000,
- 0xffffffffffffffff, 0x00ff00ff00ff00ff, 0x1234567890abcdef
- };
+ static const uint64_t kValues[] = {0x8000000000000000,
+ 0x7fffffffffffffff,
+ 0x0000000000000000,
+ 0xffffffffffffffff,
+ 0x00ff00ff00ff00ff,
+ 0x1234567890abcdef};
LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
}
TEST(ldr_literal_values_w) {
- static const uint32_t kValues[] = {
- 0x80000000, 0x7fffffff, 0x00000000, 0xffffffff, 0x00ff00ff, 0x12345678,
- 0x90abcdef
- };
+ static const uint32_t kValues[] = {0x80000000,
+ 0x7fffffff,
+ 0x00000000,
+ 0xffffffff,
+ 0x00ff00ff,
+ 0x12345678,
+ 0x90abcdef};
LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
}
@@ -7142,16 +7268,16 @@
const bool is_32bits = (sizeof(T) == 4);
const FPRegister& fp_tgt = is_32bits ? s2 : d2;
- const Register& tgt1 = is_32bits ? w1 : x1;
- const Register& tgt2 = is_32bits ? w2 : x2;
+ const Register& tgt1 = is_32bits ? Register(w1) : Register(x1);
+ const Register& tgt2 = is_32bits ? Register(w2) : Register(x2);
START();
__ Mov(x0, 0);
// If one of the values differ then x0 will be one.
for (int i = 0; i < card; ++i) {
- __ Mov(tgt1, is_32bits ? FloatToRawbits(values[i])
- : DoubleToRawbits(values[i]));
+ __ Mov(tgt1,
+ is_32bits ? FloatToRawbits(values[i]) : DoubleToRawbits(values[i]));
__ Ldr(fp_tgt, values[i]);
__ Fmov(tgt2, fp_tgt);
__ Cmp(tgt1, tgt2);
@@ -7168,18 +7294,14 @@
}
TEST(ldr_literal_values_d) {
- static const double kValues[] = {
- -0.0, 0.0, -1.0, 1.0, -1e10, 1e10
- };
+ static const double kValues[] = {-0.0, 0.0, -1.0, 1.0, -1e10, 1e10};
LoadFPValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
}
TEST(ldr_literal_values_s) {
- static const float kValues[] = {
- -0.0, 0.0, -1.0, 1.0, -1e10, 1e10
- };
+ static const float kValues[] = {-0.0, 0.0, -1.0, 1.0, -1e10, 1e10};
LoadFPValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
}
@@ -7187,16 +7309,19 @@
TEST(ldr_literal_custom) {
SETUP();
- ALLOW_ASM();
Label end_of_pool_before;
Label end_of_pool_after;
+
+ const size_t kSizeOfPoolInBytes = 44;
+
Literal<uint64_t> before_x(0x1234567890abcdef);
Literal<uint32_t> before_w(0xfedcba09);
Literal<uint32_t> before_sx(0x80000000);
Literal<uint64_t> before_q(0x1234000056780000, 0xabcd0000ef000000);
Literal<double> before_d(1.234);
Literal<float> before_s(2.5);
+
Literal<uint64_t> after_x(0x1234567890abcdef);
Literal<uint32_t> after_w(0xfedcba09);
Literal<uint32_t> after_sx(0x80000000);
@@ -7208,36 +7333,45 @@
// Manually generate a pool.
__ B(&end_of_pool_before);
- __ place(&before_x);
- __ place(&before_w);
- __ place(&before_sx);
- __ place(&before_q);
- __ place(&before_d);
- __ place(&before_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&before_x);
+ __ place(&before_w);
+ __ place(&before_sx);
+ __ place(&before_q);
+ __ place(&before_d);
+ __ place(&before_s);
+ }
__ Bind(&end_of_pool_before);
- __ ldr(x2, &before_x);
- __ ldr(w3, &before_w);
- __ ldrsw(x5, &before_sx);
- __ ldr(q11, &before_q);
- __ ldr(d13, &before_d);
- __ ldr(s25, &before_s);
+ {
+ ExactAssemblyScope scope(&masm, 12 * kInstructionSize);
+ __ ldr(x2, &before_x);
+ __ ldr(w3, &before_w);
+ __ ldrsw(x5, &before_sx);
+ __ ldr(q11, &before_q);
+ __ ldr(d13, &before_d);
+ __ ldr(s25, &before_s);
- __ ldr(x6, &after_x);
- __ ldr(w7, &after_w);
- __ ldrsw(x8, &after_sx);
- __ ldr(q18, &after_q);
- __ ldr(d14, &after_d);
- __ ldr(s26, &after_s);
+ __ ldr(x6, &after_x);
+ __ ldr(w7, &after_w);
+ __ ldrsw(x8, &after_sx);
+ __ ldr(q18, &after_q);
+ __ ldr(d14, &after_d);
+ __ ldr(s26, &after_s);
+ }
// Manually generate a pool.
__ B(&end_of_pool_after);
- __ place(&after_x);
- __ place(&after_w);
- __ place(&after_sx);
- __ place(&after_q);
- __ place(&after_d);
- __ place(&after_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&after_x);
+ __ place(&after_w);
+ __ place(&after_sx);
+ __ place(&after_q);
+ __ place(&after_d);
+ __ place(&after_s);
+ }
__ Bind(&end_of_pool_after);
END();
@@ -7264,15 +7398,18 @@
TEST(ldr_literal_custom_shared) {
SETUP();
- ALLOW_ASM();
Label end_of_pool_before;
Label end_of_pool_after;
+
+ const size_t kSizeOfPoolInBytes = 40;
+
Literal<uint64_t> before_x(0x1234567890abcdef);
Literal<uint32_t> before_w(0xfedcba09);
Literal<uint64_t> before_q(0x1234000056780000, 0xabcd0000ef000000);
Literal<double> before_d(1.234);
Literal<float> before_s(2.5);
+
Literal<uint64_t> after_x(0x1234567890abcdef);
Literal<uint32_t> after_w(0xfedcba09);
Literal<uint64_t> after_q(0x1234000056780000, 0xabcd0000ef000000);
@@ -7283,25 +7420,29 @@
// Manually generate a pool.
__ B(&end_of_pool_before);
- __ place(&before_x);
- __ place(&before_w);
- __ place(&before_q);
- __ place(&before_d);
- __ place(&before_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&before_x);
+ __ place(&before_w);
+ __ place(&before_q);
+ __ place(&before_d);
+ __ place(&before_s);
+ }
__ Bind(&end_of_pool_before);
// Load the entries several times to test that literals can be shared.
for (int i = 0; i < 50; i++) {
+ ExactAssemblyScope scope(&masm, 12 * kInstructionSize);
__ ldr(x2, &before_x);
__ ldr(w3, &before_w);
- __ ldrsw(x5, &before_w); // Re-use before_w.
+ __ ldrsw(x5, &before_w); // Re-use before_w.
__ ldr(q11, &before_q);
__ ldr(d13, &before_d);
__ ldr(s25, &before_s);
__ ldr(x6, &after_x);
__ ldr(w7, &after_w);
- __ ldrsw(x8, &after_w); // Re-use after_w.
+ __ ldrsw(x8, &after_w); // Re-use after_w.
__ ldr(q18, &after_q);
__ ldr(d14, &after_d);
__ ldr(s26, &after_s);
@@ -7309,11 +7450,14 @@
// Manually generate a pool.
__ B(&end_of_pool_after);
- __ place(&after_x);
- __ place(&after_w);
- __ place(&after_q);
- __ place(&after_d);
- __ place(&after_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&after_x);
+ __ place(&after_w);
+ __ place(&after_q);
+ __ place(&after_d);
+ __ place(&after_s);
+ }
__ Bind(&end_of_pool_after);
END();
@@ -7417,13 +7561,13 @@
TEST(prfm_literal_imm19) {
SETUP();
- ALLOW_ASM();
START();
for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
// Unallocated prefetch operations are ignored, so test all of them.
PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ ExactAssemblyScope scope(&masm, 7 * kInstructionSize);
// The address used in prfm doesn't have to be valid.
__ prfm(op, INT64_C(0));
__ prfm(op, 1);
@@ -7442,7 +7586,6 @@
TEST(prfm_literal) {
SETUP();
- ALLOW_ASM();
Label end_of_pool_before;
Label end_of_pool_after;
@@ -7453,21 +7596,27 @@
// Manually generate a pool.
__ B(&end_of_pool_before);
- __ place(&before);
+ {
+ ExactAssemblyScope scope(&masm, before.GetSize());
+ __ place(&before);
+ }
__ Bind(&end_of_pool_before);
for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
// Unallocated prefetch operations are ignored, so test all of them.
PrefetchOperation op = static_cast<PrefetchOperation>(i);
- CodeBufferCheckScope guard(&masm, 2 * kInstructionSize);
+ ExactAssemblyScope guard(&masm, 2 * kInstructionSize);
__ prfm(op, &before);
__ prfm(op, &after);
}
// Manually generate a pool.
__ B(&end_of_pool_after);
- __ place(&after);
+ {
+ ExactAssemblyScope scope(&masm, after.GetSize());
+ __ place(&after);
+ }
__ Bind(&end_of_pool_after);
END();
@@ -7502,10 +7651,12 @@
TEST(load_prfm_literal) {
// Test literals shared between both prfm and ldr.
SETUP();
- ALLOW_ASM();
Label end_of_pool_before;
Label end_of_pool_after;
+
+ const size_t kSizeOfPoolInBytes = 28;
+
Literal<uint64_t> before_x(0x1234567890abcdef);
Literal<uint32_t> before_w(0xfedcba09);
Literal<uint32_t> before_sx(0x80000000);
@@ -7521,16 +7672,20 @@
// Manually generate a pool.
__ B(&end_of_pool_before);
- __ place(&before_x);
- __ place(&before_w);
- __ place(&before_sx);
- __ place(&before_d);
- __ place(&before_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&before_x);
+ __ place(&before_w);
+ __ place(&before_sx);
+ __ place(&before_d);
+ __ place(&before_s);
+ }
__ Bind(&end_of_pool_before);
for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
// Unallocated prefetch operations are ignored, so test all of them.
PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
__ prfm(op, &before_x);
__ prfm(op, &before_w);
@@ -7545,25 +7700,31 @@
__ prfm(op, &after_s);
}
- __ ldr(x2, &before_x);
- __ ldr(w3, &before_w);
- __ ldrsw(x5, &before_sx);
- __ ldr(d13, &before_d);
- __ ldr(s25, &before_s);
+ {
+ ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
+ __ ldr(x2, &before_x);
+ __ ldr(w3, &before_w);
+ __ ldrsw(x5, &before_sx);
+ __ ldr(d13, &before_d);
+ __ ldr(s25, &before_s);
- __ ldr(x6, &after_x);
- __ ldr(w7, &after_w);
- __ ldrsw(x8, &after_sx);
- __ ldr(d14, &after_d);
- __ ldr(s26, &after_s);
+ __ ldr(x6, &after_x);
+ __ ldr(w7, &after_w);
+ __ ldrsw(x8, &after_sx);
+ __ ldr(d14, &after_d);
+ __ ldr(s26, &after_s);
+ }
// Manually generate a pool.
__ B(&end_of_pool_after);
- __ place(&after_x);
- __ place(&after_w);
- __ place(&after_sx);
- __ place(&after_d);
- __ place(&after_s);
+ {
+ ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
+ __ place(&after_x);
+ __ place(&after_w);
+ __ place(&after_sx);
+ __ place(&after_d);
+ __ place(&after_s);
+ }
__ Bind(&end_of_pool_after);
END();
@@ -7954,9 +8115,9 @@
}
-template<typename T, typename Op>
-static void AdcsSbcsHelper(Op op, T left, T right, int carry,
- T expected, StatusFlags expected_flags) {
+template <typename T, typename Op>
+static void AdcsSbcsHelper(
+ Op op, T left, T right, int carry, T expected, StatusFlags expected_flags) {
int reg_size = sizeof(T) * 8;
Register left_reg(0, reg_size);
Register right_reg(1, reg_size);
@@ -7986,10 +8147,14 @@
TEST(adcs_sbcs_x) {
uint64_t inputs[] = {
- 0x0000000000000000, 0x0000000000000001,
- 0x7ffffffffffffffe, 0x7fffffffffffffff,
- 0x8000000000000000, 0x8000000000000001,
- 0xfffffffffffffffe, 0xffffffffffffffff,
+ 0x0000000000000000,
+ 0x0000000000000001,
+ 0x7ffffffffffffffe,
+ 0x7fffffffffffffff,
+ 0x8000000000000000,
+ 0x8000000000000001,
+ 0xfffffffffffffffe,
+ 0xffffffffffffffff,
};
static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
@@ -8000,157 +8165,171 @@
StatusFlags carry1_flags;
};
- static const Expected expected_adcs_x[input_count][input_count] = {
- {{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag},
- {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}},
- {{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
- {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag}},
- {{0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag}},
- {{0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag}},
- {{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
- {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag}},
- {{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
- {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag}},
- {{0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag}},
- {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
- {0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag}}
- };
+ static const Expected expected_adcs_x[input_count][input_count] =
+ {{{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}},
+ {{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag}},
+ {{0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag}},
+ {{0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag}},
+ {{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag}},
+ {{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag}},
+ {{0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag}},
+ {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag}}};
- static const Expected expected_sbcs_x[input_count][input_count] = {
- {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
- {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
- {0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag}},
- {{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
- {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag}},
- {{0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag}},
- {{0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag}},
- {{0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
- {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0x8000000000000000, NFlag, 0x8000000000000001, NFlag}},
- {{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
- {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0x8000000000000001, NFlag, 0x8000000000000002, NFlag}},
- {{0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
- {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag}},
- {{0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
- {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}}
- };
+ static const Expected expected_sbcs_x[input_count][input_count] =
+ {{{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag}},
+ {{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag}},
+ {{0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag}},
+ {{0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag}},
+ {{0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag}},
+ {{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag}},
+ {{0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag}},
+ {{0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}}};
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
- const Expected & expected = expected_adcs_x[left][right];
- AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 0,
- expected.carry0_result, expected.carry0_flags);
- AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 1,
- expected.carry1_result, expected.carry1_flags);
+ const Expected& expected = expected_adcs_x[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Adcs,
+ inputs[left],
+ inputs[right],
+ 0,
+ expected.carry0_result,
+ expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Adcs,
+ inputs[left],
+ inputs[right],
+ 1,
+ expected.carry1_result,
+ expected.carry1_flags);
}
}
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
- const Expected & expected = expected_sbcs_x[left][right];
- AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 0,
- expected.carry0_result, expected.carry0_flags);
- AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 1,
- expected.carry1_result, expected.carry1_flags);
+ const Expected& expected = expected_sbcs_x[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Sbcs,
+ inputs[left],
+ inputs[right],
+ 0,
+ expected.carry0_result,
+ expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Sbcs,
+ inputs[left],
+ inputs[right],
+ 1,
+ expected.carry1_result,
+ expected.carry1_flags);
}
}
}
@@ -8158,8 +8337,14 @@
TEST(adcs_sbcs_w) {
uint32_t inputs[] = {
- 0x00000000, 0x00000001, 0x7ffffffe, 0x7fffffff,
- 0x80000000, 0x80000001, 0xfffffffe, 0xffffffff,
+ 0x00000000,
+ 0x00000001,
+ 0x7ffffffe,
+ 0x7fffffff,
+ 0x80000000,
+ 0x80000001,
+ 0xfffffffe,
+ 0xffffffff,
};
static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
@@ -8170,157 +8355,171 @@
StatusFlags carry1_flags;
};
- static const Expected expected_adcs_w[input_count][input_count] = {
- {{0x00000000, ZFlag, 0x00000001, NoFlag},
- {0x00000001, NoFlag, 0x00000002, NoFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x80000000, NFlag, 0x80000001, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag}},
- {{0x00000001, NoFlag, 0x00000002, NoFlag},
- {0x00000002, NoFlag, 0x00000003, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x80000000, NVFlag, 0x80000001, NVFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0x80000002, NFlag, 0x80000003, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag}},
- {{0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag}},
- {{0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x80000000, NVFlag, 0x80000001, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag}},
- {{0x80000000, NFlag, 0x80000001, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x00000000, ZCVFlag, 0x00000001, CVFlag},
- {0x00000001, CVFlag, 0x00000002, CVFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag}},
- {{0x80000001, NFlag, 0x80000002, NFlag},
- {0x80000002, NFlag, 0x80000003, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0x00000001, CVFlag, 0x00000002, CVFlag},
- {0x00000002, CVFlag, 0x00000003, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x80000000, NCFlag, 0x80000001, NCFlag}},
- {{0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag}},
- {{0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x80000000, NCFlag, 0x80000001, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
- {0xfffffffe, NCFlag, 0xffffffff, NCFlag}}
- };
+ static const Expected expected_adcs_w[input_count][input_count] =
+ {{{0x00000000, ZFlag, 0x00000001, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag}},
+ {{0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x00000002, NoFlag, 0x00000003, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag}},
+ {{0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag}},
+ {{0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag}},
+ {{0x80000000, NFlag, 0x80000001, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCVFlag, 0x00000001, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag}},
+ {{0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000002, CVFlag, 0x00000003, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag}},
+ {{0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag}},
+ {{0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0xfffffffe, NCFlag, 0xffffffff, NCFlag}}};
- static const Expected expected_sbcs_w[input_count][input_count] = {
- {{0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0x80000000, NFlag, 0x80000001, NFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
- {0x00000001, NoFlag, 0x00000002, NoFlag},
- {0x00000000, ZFlag, 0x00000001, NoFlag}},
- {{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x80000002, NFlag, 0x80000003, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0x80000000, NVFlag, 0x80000001, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x00000002, NoFlag, 0x00000003, NoFlag},
- {0x00000001, NoFlag, 0x00000002, NoFlag}},
- {{0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag}},
- {{0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0x80000000, NVFlag, 0x80000001, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag}},
- {{0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x00000001, CVFlag, 0x00000002, CVFlag},
- {0x00000000, ZCVFlag, 0x00000001, CVFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag},
- {0x80000000, NFlag, 0x80000001, NFlag}},
- {{0x80000000, NCFlag, 0x80000001, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x00000002, CVFlag, 0x00000003, CVFlag},
- {0x00000001, CVFlag, 0x00000002, CVFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x80000002, NFlag, 0x80000003, NFlag},
- {0x80000001, NFlag, 0x80000002, NFlag}},
- {{0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
- {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag}},
- {{0xfffffffe, NCFlag, 0xffffffff, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
- {0x80000000, NCFlag, 0x80000001, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag}}
- };
+ static const Expected expected_sbcs_w[input_count][input_count] =
+ {{{0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x00000000, ZFlag, 0x00000001, NoFlag}},
+ {{0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x00000002, NoFlag, 0x00000003, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag}},
+ {{0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag}},
+ {{0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag}},
+ {{0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000000, ZCVFlag, 0x00000001, CVFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag}},
+ {{0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x00000002, CVFlag, 0x00000003, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag}},
+ {{0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag}},
+ {{0xfffffffe, NCFlag, 0xffffffff, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag}}};
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
- const Expected & expected = expected_adcs_w[left][right];
- AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 0,
- expected.carry0_result, expected.carry0_flags);
- AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 1,
- expected.carry1_result, expected.carry1_flags);
+ const Expected& expected = expected_adcs_w[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Adcs,
+ inputs[left],
+ inputs[right],
+ 0,
+ expected.carry0_result,
+ expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Adcs,
+ inputs[left],
+ inputs[right],
+ 1,
+ expected.carry1_result,
+ expected.carry1_flags);
}
}
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
- const Expected & expected = expected_sbcs_w[left][right];
- AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 0,
- expected.carry0_result, expected.carry0_flags);
- AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 1,
- expected.carry1_result, expected.carry1_flags);
+ const Expected& expected = expected_sbcs_w[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Sbcs,
+ inputs[left],
+ inputs[right],
+ 0,
+ expected.carry0_result,
+ expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Sbcs,
+ inputs[left],
+ inputs[right],
+ 1,
+ expected.carry1_result,
+ expected.carry1_flags);
}
}
}
@@ -8794,7 +8993,6 @@
TEST(ccmp) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(w16, 0);
@@ -8816,11 +9014,17 @@
__ Mrs(x3, NZCV);
// The MacroAssembler does not allow al as a condition.
- __ ccmp(x16, x16, NZCVFlag, al);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ ccmp(x16, x16, NZCVFlag, al);
+ }
__ Mrs(x4, NZCV);
// The MacroAssembler does not allow nv as a condition.
- __ ccmp(x16, x16, NZCVFlag, nv);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ ccmp(x16, x16, NZCVFlag, nv);
+ }
__ Mrs(x5, NZCV);
END();
@@ -8907,7 +9111,6 @@
TEST(csel_reg) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x16, 0);
@@ -8921,8 +9124,11 @@
__ Csinc(w3, w24, w25, pl);
// The MacroAssembler does not allow al or nv as a condition.
- __ csel(w13, w24, w25, al);
- __ csel(x14, x24, x25, nv);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ csel(w13, w24, w25, al);
+ __ csel(x14, x24, x25, nv);
+ }
__ Cmp(x16, Operand(1));
__ Csinv(x4, x24, x25, gt);
@@ -8937,8 +9143,11 @@
__ Cneg(x12, x24, ne);
// The MacroAssembler does not allow al or nv as a condition.
- __ csel(w15, w24, w25, al);
- __ csel(x17, x24, x25, nv);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ csel(w15, w24, w25, al);
+ __ csel(x17, x24, x25, nv);
+ }
END();
@@ -9055,7 +9264,6 @@
TEST(lslv) {
SETUP();
- ALLOW_ASM();
uint64_t value = 0x0123456789abcdef;
int shift[] = {1, 3, 5, 9, 17, 33};
@@ -9070,7 +9278,10 @@
__ Mov(w6, shift[5]);
// The MacroAssembler does not allow zr as an argument.
- __ lslv(x0, x0, xzr);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ lslv(x0, x0, xzr);
+ }
__ Lsl(x16, x0, x1);
__ Lsl(x17, x0, x2);
@@ -9109,7 +9320,6 @@
TEST(lsrv) {
SETUP();
- ALLOW_ASM();
uint64_t value = 0x0123456789abcdef;
int shift[] = {1, 3, 5, 9, 17, 33};
@@ -9124,7 +9334,10 @@
__ Mov(w6, shift[5]);
// The MacroAssembler does not allow zr as an argument.
- __ lsrv(x0, x0, xzr);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ lsrv(x0, x0, xzr);
+ }
__ Lsr(x16, x0, x1);
__ Lsr(x17, x0, x2);
@@ -9165,7 +9378,6 @@
TEST(asrv) {
SETUP();
- ALLOW_ASM();
int64_t value = 0xfedcba98fedcba98;
int shift[] = {1, 3, 5, 9, 17, 33};
@@ -9180,7 +9392,10 @@
__ Mov(w6, shift[5]);
// The MacroAssembler does not allow zr as an argument.
- __ asrv(x0, x0, xzr);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ asrv(x0, x0, xzr);
+ }
__ Asr(x16, x0, x1);
__ Asr(x17, x0, x2);
@@ -9221,7 +9436,6 @@
TEST(rorv) {
SETUP();
- ALLOW_ASM();
uint64_t value = 0x0123456789abcdef;
int shift[] = {4, 8, 12, 16, 24, 36};
@@ -9236,7 +9450,10 @@
__ Mov(w6, shift[5]);
// The MacroAssembler does not allow zr as an argument.
- __ rorv(x0, x0, xzr);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ rorv(x0, x0, xzr);
+ }
__ Ror(x16, x0, x1);
__ Ror(x17, x0, x2);
@@ -9275,7 +9492,6 @@
TEST(bfm) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x1, 0x0123456789abcdef);
@@ -9287,7 +9503,6 @@
__ Mov(w20, 0x88888888);
__ Mov(w21, 0x88888888);
- // There are no macro instruction for bfm.
__ Bfm(x10, x1, 16, 31);
__ Bfm(x11, x1, 32, 15);
@@ -9317,13 +9532,11 @@
TEST(sbfm) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x1, 0x0123456789abcdef);
__ Mov(x2, 0xfedcba9876543210);
- // There are no macro instruction for sbfm.
__ Sbfm(x10, x1, 16, 31);
__ Sbfm(x11, x1, 32, 15);
__ Sbfm(x12, x1, 32, 47);
@@ -9381,7 +9594,6 @@
TEST(ubfm) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x1, 0x0123456789abcdef);
@@ -9390,7 +9602,6 @@
__ Mov(x10, 0x8888888888888888);
__ Mov(x11, 0x8888888888888888);
- // There are no macro instruction for ubfm.
__ Ubfm(x10, x1, 16, 31);
__ Ubfm(x11, x1, 32, 15);
__ Ubfm(x12, x1, 32, 47);
@@ -9706,9 +9917,13 @@
}
-static void FmaddFmsubHelper(double n, double m, double a,
- double fmadd, double fmsub,
- double fnmadd, double fnmsub) {
+static void FmaddFmsubHelper(double n,
+ double m,
+ double a,
+ double fmadd,
+ double fmsub,
+ double fnmadd,
+ double fnmsub) {
SETUP();
START();
@@ -9742,28 +9957,40 @@
// Check the sign of exact zeroes.
// n m a fmadd fmsub fnmadd fnmsub
- FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
- FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
- FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
- FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
- FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
- FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
- FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
- FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+ FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+ FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
// Check NaN generation.
- FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
- FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
- FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
- kFP64PositiveInfinity, // inf + ( inf * 1) = inf
- kFP64DefaultNaN, // inf + (-inf * 1) = NaN
- kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
- kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
- FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
+ FmaddFmsubHelper(kFP64PositiveInfinity,
+ 0.0,
+ 42.0,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
+ FmaddFmsubHelper(0.0,
+ kFP64PositiveInfinity,
+ 42.0,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity,
+ 1.0,
+ kFP64PositiveInfinity,
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP64NegativeInfinity,
+ 1.0,
+ kFP64PositiveInfinity,
kFP64DefaultNaN, // inf + (-inf * 1) = NaN
kFP64PositiveInfinity, // inf + ( inf * 1) = inf
kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
@@ -9771,9 +9998,13 @@
}
-static void FmaddFmsubHelper(float n, float m, float a,
- float fmadd, float fmsub,
- float fnmadd, float fnmsub) {
+static void FmaddFmsubHelper(float n,
+ float m,
+ float a,
+ float fmadd,
+ float fmsub,
+ float fnmadd,
+ float fnmsub) {
SETUP();
START();
@@ -9817,18 +10048,30 @@
FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
// Check NaN generation.
- FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
- FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
- FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
- kFP32PositiveInfinity, // inf + ( inf * 1) = inf
- kFP32DefaultNaN, // inf + (-inf * 1) = NaN
- kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
- kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
- FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
+ FmaddFmsubHelper(kFP32PositiveInfinity,
+ 0.0f,
+ 42.0f,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
+ FmaddFmsubHelper(0.0f,
+ kFP32PositiveInfinity,
+ 42.0f,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity,
+ 1.0f,
+ kFP32PositiveInfinity,
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP32NegativeInfinity,
+ 1.0f,
+ kFP32PositiveInfinity,
kFP32DefaultNaN, // inf + (-inf * 1) = NaN
kFP32PositiveInfinity, // inf + ( inf * 1) = inf
kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
@@ -9903,18 +10146,34 @@
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
// A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
- FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
- FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
- FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
- FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
- kFP64DefaultNaN, kFP64DefaultNaN,
- kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0,
+ kFP64PositiveInfinity,
+ qa,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity,
+ 0,
+ qa,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
+ FmaddFmsubHelper(0,
+ kFP64NegativeInfinity,
+ qa,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64NegativeInfinity,
+ 0,
+ qa,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN,
+ kFP64DefaultNaN);
}
@@ -9985,18 +10244,34 @@
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
// A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
- FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
- FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
- FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
- FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
- kFP32DefaultNaN, kFP32DefaultNaN,
- kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0,
+ kFP32PositiveInfinity,
+ qa,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity,
+ 0,
+ qa,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
+ FmaddFmsubHelper(0,
+ kFP32NegativeInfinity,
+ qa,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32NegativeInfinity,
+ 0,
+ qa,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN,
+ kFP32DefaultNaN);
}
@@ -10090,8 +10365,7 @@
}
}
- if ((n == 0.0) && (m == 0.0) &&
- (copysign(1.0, n) != copysign(1.0, m))) {
+ if ((n == 0.0) && (m == 0.0) && (copysign(1.0, n) != copysign(1.0, m))) {
return min ? -0.0 : 0.0;
}
@@ -10132,8 +10406,7 @@
}
}
- if ((n == 0.0) && (m == 0.0) &&
- (copysign(1.0, n) != copysign(1.0, m))) {
+ if ((n == 0.0) && (m == 0.0) && (copysign(1.0, n) != copysign(1.0, m))) {
return min ? -0.0 : 0.0;
}
@@ -10141,8 +10414,8 @@
}
-static void FminFmaxDoubleHelper(double n, double m, double min, double max,
- double minnm, double maxnm) {
+static void FminFmaxDoubleHelper(
+ double n, double m, double min, double max, double minnm, double maxnm) {
SETUP();
START();
@@ -10181,33 +10454,52 @@
// Bootstrap tests.
FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
- FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
- kFP64NegativeInfinity, kFP64PositiveInfinity,
- kFP64NegativeInfinity, kFP64PositiveInfinity);
- FminFmaxDoubleHelper(snan, 0,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxDoubleHelper(0, snan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxDoubleHelper(qnan, 0,
- qnan_processed, qnan_processed,
- 0, 0);
- FminFmaxDoubleHelper(0, qnan,
- qnan_processed, qnan_processed,
- 0, 0);
- FminFmaxDoubleHelper(qnan, snan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxDoubleHelper(snan, qnan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
+ FminFmaxDoubleHelper(kFP64PositiveInfinity,
+ kFP64NegativeInfinity,
+ kFP64NegativeInfinity,
+ kFP64PositiveInfinity,
+ kFP64NegativeInfinity,
+ kFP64PositiveInfinity);
+ FminFmaxDoubleHelper(snan,
+ 0,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxDoubleHelper(0,
+ snan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxDoubleHelper(qnan, 0, qnan_processed, qnan_processed, 0, 0);
+ FminFmaxDoubleHelper(0, qnan, qnan_processed, qnan_processed, 0, 0);
+ FminFmaxDoubleHelper(qnan,
+ snan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxDoubleHelper(snan,
+ qnan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
// Iterate over all combinations of inputs.
- double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
- -DBL_MAX, -DBL_MIN, -1.0, -0.0,
- kFP64PositiveInfinity, kFP64NegativeInfinity,
- kFP64QuietNaN, kFP64SignallingNaN };
+ double inputs[] = {DBL_MAX,
+ DBL_MIN,
+ 1.0,
+ 0.0,
+ -DBL_MAX,
+ -DBL_MIN,
+ -1.0,
+ -0.0,
+ kFP64PositiveInfinity,
+ kFP64NegativeInfinity,
+ kFP64QuietNaN,
+ kFP64SignallingNaN};
const int count = sizeof(inputs) / sizeof(inputs[0]);
@@ -10215,7 +10507,8 @@
double n = inputs[in];
for (int im = 0; im < count; im++) {
double m = inputs[im];
- FminFmaxDoubleHelper(n, m,
+ FminFmaxDoubleHelper(n,
+ m,
MinMaxHelper(n, m, true),
MinMaxHelper(n, m, false),
MinMaxHelper(n, m, true, kFP64PositiveInfinity),
@@ -10225,8 +10518,8 @@
}
-static void FminFmaxFloatHelper(float n, float m, float min, float max,
- float minnm, float maxnm) {
+static void FminFmaxFloatHelper(
+ float n, float m, float min, float max, float minnm, float maxnm) {
SETUP();
START();
@@ -10265,33 +10558,52 @@
// Bootstrap tests.
FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
- FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
- kFP32NegativeInfinity, kFP32PositiveInfinity,
- kFP32NegativeInfinity, kFP32PositiveInfinity);
- FminFmaxFloatHelper(snan, 0,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxFloatHelper(0, snan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxFloatHelper(qnan, 0,
- qnan_processed, qnan_processed,
- 0, 0);
- FminFmaxFloatHelper(0, qnan,
- qnan_processed, qnan_processed,
- 0, 0);
- FminFmaxFloatHelper(qnan, snan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
- FminFmaxFloatHelper(snan, qnan,
- snan_processed, snan_processed,
- snan_processed, snan_processed);
+ FminFmaxFloatHelper(kFP32PositiveInfinity,
+ kFP32NegativeInfinity,
+ kFP32NegativeInfinity,
+ kFP32PositiveInfinity,
+ kFP32NegativeInfinity,
+ kFP32PositiveInfinity);
+ FminFmaxFloatHelper(snan,
+ 0,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxFloatHelper(0,
+ snan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxFloatHelper(qnan, 0, qnan_processed, qnan_processed, 0, 0);
+ FminFmaxFloatHelper(0, qnan, qnan_processed, qnan_processed, 0, 0);
+ FminFmaxFloatHelper(qnan,
+ snan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
+ FminFmaxFloatHelper(snan,
+ qnan,
+ snan_processed,
+ snan_processed,
+ snan_processed,
+ snan_processed);
// Iterate over all combinations of inputs.
- float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
- -FLT_MAX, -FLT_MIN, -1.0, -0.0,
- kFP32PositiveInfinity, kFP32NegativeInfinity,
- kFP32QuietNaN, kFP32SignallingNaN };
+ float inputs[] = {FLT_MAX,
+ FLT_MIN,
+ 1.0,
+ 0.0,
+ -FLT_MAX,
+ -FLT_MIN,
+ -1.0,
+ -0.0,
+ kFP32PositiveInfinity,
+ kFP32NegativeInfinity,
+ kFP32QuietNaN,
+ kFP32SignallingNaN};
const int count = sizeof(inputs) / sizeof(inputs[0]);
@@ -10299,7 +10611,8 @@
float n = inputs[in];
for (int im = 0; im < count; im++) {
float m = inputs[im];
- FminFmaxFloatHelper(n, m,
+ FminFmaxFloatHelper(n,
+ m,
MinMaxHelper(n, m, true),
MinMaxHelper(n, m, false),
MinMaxHelper(n, m, true, kFP32PositiveInfinity),
@@ -10311,7 +10624,6 @@
TEST(fccmp) {
SETUP();
- ALLOW_ASM();
START();
__ Fmov(s16, 0.0);
@@ -10357,10 +10669,16 @@
__ Mrs(x7, NZCV);
// The Macro Assembler does not allow al or nv as condition.
- __ fccmp(s16, s16, NFlag, al);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ fccmp(s16, s16, NFlag, al);
+ }
__ Mrs(x8, NZCV);
- __ fccmp(d18, d18, NFlag, nv);
+ {
+ ExactAssemblyScope scope(&masm, kInstructionSize);
+ __ fccmp(d18, d18, NFlag, nv);
+ }
__ Mrs(x9, NZCV);
__ Cmp(x20, 0);
@@ -10502,7 +10820,6 @@
TEST(fcsel) {
SETUP();
- ALLOW_ASM();
START();
__ Mov(x16, 0);
@@ -10517,8 +10834,11 @@
__ Fcsel(d2, d18, d19, eq);
__ Fcsel(d3, d18, d19, ne);
// The Macro Assembler does not allow al or nv as condition.
- __ fcsel(s4, s16, s17, al);
- __ fcsel(d5, d18, d19, nv);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ fcsel(s4, s16, s17, al);
+ __ fcsel(d5, d18, d19, nv);
+ }
END();
RUN();
@@ -11301,8 +11621,8 @@
__ Fmov(s26, -0.0);
__ Fmov(s27, FLT_MAX);
__ Fmov(s28, FLT_MIN);
- __ Fmov(s29, RawbitsToFloat(0x7fc12345)); // Quiet NaN.
- __ Fmov(s30, RawbitsToFloat(0x7f812345)); // Signalling NaN.
+ __ Fmov(s29, RawbitsToFloat(0x7fc12345)); // Quiet NaN.
+ __ Fmov(s30, RawbitsToFloat(0x7f812345)); // Signalling NaN.
__ Fcvt(d0, s16);
__ Fcvt(d1, s17);
@@ -11370,8 +11690,8 @@
__ Fmov(d26, -0.0);
__ Fmov(d27, FLT_MAX);
__ Fmov(d28, FLT_MIN);
- __ Fmov(d29, RawbitsToDouble(0x7ff82468a0000000)); // Quiet NaN.
- __ Fmov(d30, RawbitsToDouble(0x7ff02468a0000000)); // Signalling NaN.
+ __ Fmov(d29, RawbitsToDouble(0x7ff82468a0000000)); // Quiet NaN.
+ __ Fmov(d30, RawbitsToDouble(0x7ff02468a0000000)); // Signalling NaN.
__ Fcvt(s0, d16);
__ Fcvt(s1, d17);
@@ -11548,15 +11868,15 @@
__ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 2.5);
__ Fmov(d26, -2.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtas(w0, s0);
__ Fcvtas(w1, s1);
@@ -11736,8 +12056,8 @@
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
- __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -11751,15 +12071,15 @@
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtms(w0, s0);
__ Fcvtms(w1, s1);
@@ -11840,8 +12160,8 @@
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
- __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -11855,15 +12175,15 @@
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtmu(w0, s0);
__ Fcvtmu(w1, s1);
@@ -11942,8 +12262,8 @@
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
- __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -11957,15 +12277,15 @@
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtns(w0, s0);
__ Fcvtns(w1, s1);
@@ -12145,8 +12465,8 @@
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
- __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -12160,15 +12480,15 @@
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtzs(w0, s0);
__ Fcvtzs(w1, s1);
@@ -12248,8 +12568,8 @@
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
- __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -12263,15 +12583,15 @@
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
- __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
- __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+ __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtzu(w0, s0);
__ Fcvtzu(w1, s1);
@@ -12903,7 +13223,6 @@
TEST(zero_dest) {
SETUP();
- ALLOW_ASM();
RegisterDump before;
START();
@@ -12914,39 +13233,42 @@
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (unsigned i = 2; i < x30.GetCode(); i++) {
- __ Add(Register::GetXRegFromCode(i), Register::GetXRegFromCode(i-1), x1);
+ __ Add(Register::GetXRegFromCode(i), Register::GetXRegFromCode(i - 1), x1);
}
before.Dump(&masm);
// All of these instructions should be NOPs in these forms, but have
// alternate forms which can write into the stack pointer.
- __ add(xzr, x0, x1);
- __ add(xzr, x1, xzr);
- __ add(xzr, xzr, x1);
+ {
+ ExactAssemblyScope scope(&masm, 3 * 7 * kInstructionSize);
+ __ add(xzr, x0, x1);
+ __ add(xzr, x1, xzr);
+ __ add(xzr, xzr, x1);
- __ and_(xzr, x0, x2);
- __ and_(xzr, x2, xzr);
- __ and_(xzr, xzr, x2);
+ __ and_(xzr, x0, x2);
+ __ and_(xzr, x2, xzr);
+ __ and_(xzr, xzr, x2);
- __ bic(xzr, x0, x3);
- __ bic(xzr, x3, xzr);
- __ bic(xzr, xzr, x3);
+ __ bic(xzr, x0, x3);
+ __ bic(xzr, x3, xzr);
+ __ bic(xzr, xzr, x3);
- __ eon(xzr, x0, x4);
- __ eon(xzr, x4, xzr);
- __ eon(xzr, xzr, x4);
+ __ eon(xzr, x0, x4);
+ __ eon(xzr, x4, xzr);
+ __ eon(xzr, xzr, x4);
- __ eor(xzr, x0, x5);
- __ eor(xzr, x5, xzr);
- __ eor(xzr, xzr, x5);
+ __ eor(xzr, x0, x5);
+ __ eor(xzr, x5, xzr);
+ __ eor(xzr, xzr, x5);
- __ orr(xzr, x0, x6);
- __ orr(xzr, x6, xzr);
- __ orr(xzr, xzr, x6);
+ __ orr(xzr, x0, x6);
+ __ orr(xzr, x6, xzr);
+ __ orr(xzr, xzr, x6);
- __ sub(xzr, x0, x7);
- __ sub(xzr, x7, xzr);
- __ sub(xzr, xzr, x7);
+ __ sub(xzr, x0, x7);
+ __ sub(xzr, x7, xzr);
+ __ sub(xzr, xzr, x7);
+ }
// Swap the saved stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
@@ -12970,7 +13292,6 @@
TEST(zero_dest_setflags) {
SETUP();
- ALLOW_ASM();
RegisterDump before;
START();
@@ -12981,37 +13302,49 @@
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (int i = 2; i < 30; i++) {
- __ Add(Register::GetXRegFromCode(i), Register::GetXRegFromCode(i-1), x1);
+ __ Add(Register::GetXRegFromCode(i), Register::GetXRegFromCode(i - 1), x1);
}
before.Dump(&masm);
// All of these instructions should only write to the flags in these forms,
// but have alternate forms which can write into the stack pointer.
- __ adds(xzr, x0, Operand(x1, UXTX));
- __ adds(xzr, x1, Operand(xzr, UXTX));
- __ adds(xzr, x1, 1234);
- __ adds(xzr, x0, x1);
- __ adds(xzr, x1, xzr);
- __ adds(xzr, xzr, x1);
+ {
+ ExactAssemblyScope scope(&masm, 6 * kInstructionSize);
+ __ adds(xzr, x0, Operand(x1, UXTX));
+ __ adds(xzr, x1, Operand(xzr, UXTX));
+ __ adds(xzr, x1, 1234);
+ __ adds(xzr, x0, x1);
+ __ adds(xzr, x1, xzr);
+ __ adds(xzr, xzr, x1);
+ }
- __ ands(xzr, x2, ~0xf);
- __ ands(xzr, xzr, ~0xf);
- __ ands(xzr, x0, x2);
- __ ands(xzr, x2, xzr);
- __ ands(xzr, xzr, x2);
+ {
+ ExactAssemblyScope scope(&masm, 5 * kInstructionSize);
+ __ ands(xzr, x2, ~0xf);
+ __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x0, x2);
+ __ ands(xzr, x2, xzr);
+ __ ands(xzr, xzr, x2);
+ }
- __ bics(xzr, x3, ~0xf);
- __ bics(xzr, xzr, ~0xf);
- __ bics(xzr, x0, x3);
- __ bics(xzr, x3, xzr);
- __ bics(xzr, xzr, x3);
+ {
+ ExactAssemblyScope scope(&masm, 5 * kInstructionSize);
+ __ bics(xzr, x3, ~0xf);
+ __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x0, x3);
+ __ bics(xzr, x3, xzr);
+ __ bics(xzr, xzr, x3);
+ }
- __ subs(xzr, x0, Operand(x3, UXTX));
- __ subs(xzr, x3, Operand(xzr, UXTX));
- __ subs(xzr, x3, 1234);
- __ subs(xzr, x0, x3);
- __ subs(xzr, x3, xzr);
- __ subs(xzr, xzr, x3);
+ {
+ ExactAssemblyScope scope(&masm, 6 * kInstructionSize);
+ __ subs(xzr, x0, Operand(x3, UXTX));
+ __ subs(xzr, x3, Operand(xzr, UXTX));
+ __ subs(xzr, x3, 1234);
+ __ subs(xzr, x0, x3);
+ __ subs(xzr, x3, xzr);
+ __ subs(xzr, xzr, x3);
+ }
// Swap the saved stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
@@ -13264,8 +13597,8 @@
uint64_t x0_expected = literal_base * 1;
uint64_t x1_expected = literal_base * 2;
uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
- uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
- ((x1_expected >> 16) & 0x0000ffff);
+ uint64_t x5_expected =
+ ((x1_expected << 16) & 0xffff0000) | ((x1_expected >> 16) & 0x0000ffff);
ASSERT_EQUAL_64(x0_expected, x0);
ASSERT_EQUAL_64(x1_expected, x1);
@@ -13311,13 +13644,13 @@
__ Mov(x4, __ StackPointer());
__ SetStackPointer(x4);
- __ Poke(wzr, 0); // Clobber the space we're about to drop.
+ __ Poke(wzr, 0); // Clobber the space we're about to drop.
__ Drop(4);
__ Peek(x6, 0);
__ Claim(8);
__ Peek(w7, 10);
__ Poke(x3, 28);
- __ Poke(xzr, 0); // Clobber the space we're about to drop.
+ __ Poke(xzr, 0); // Clobber the space we're about to drop.
__ Drop(8);
__ Poke(x2, 12);
__ Push(w0);
@@ -13336,8 +13669,8 @@
uint64_t x2_expected = literal_base * 3;
uint64_t x3_expected = literal_base * 4;
uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
- uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
- ((x0_expected >> 48) & 0x0000ffff);
+ uint64_t x7_expected =
+ ((x1_expected << 16) & 0xffff0000) | ((x0_expected >> 48) & 0x0000ffff);
ASSERT_EQUAL_64(x0_expected, x0);
ASSERT_EQUAL_64(x1_expected, x1);
@@ -13429,9 +13762,9 @@
ASSERT_EQUAL_FP64(RawbitsToDouble(4 * base_d), d12);
ASSERT_EQUAL_FP64(RawbitsToDouble(1 * base_d), d13);
ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base_d), d14);
- ASSERT_EQUAL_FP64(
- RawbitsToDouble((base_d >> kSRegSize) | ((2 * base_d) << kSRegSize)),
- d15);
+ ASSERT_EQUAL_FP64(RawbitsToDouble((base_d >> kSRegSize) |
+ ((2 * base_d) << kSRegSize)),
+ d15);
ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base_d), d14);
ASSERT_EQUAL_FP32(RawbitsToFloat((4 * base_d) & kSRegMask), s16);
ASSERT_EQUAL_FP32(RawbitsToFloat((4 * base_d) >> kSRegSize), s17);
@@ -13450,7 +13783,7 @@
// * The value is not formed from repeating fixed-size smaller values, so it
// can be used to detect endianness-related errors.
uint64_t high_base = UINT32_C(0x01000010);
- uint64_t low_base = UINT32_C(0x00100101);
+ uint64_t low_base = UINT32_C(0x00100101);
uint64_t base = (high_base << 32) | low_base;
uint64_t array[21];
memset(array, 0, sizeof(array));
@@ -13597,8 +13930,8 @@
// Work out which registers to use, based on reg_size.
Register r[kNumberOfRegisters];
Register x[kNumberOfRegisters];
- RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
- allowed);
+ RegList list =
+ PopulateRegisterArray(NULL, x, r, reg_size, reg_count, allowed);
// Acquire all temps from the MacroAssembler. They are used arbitrarily below.
UseScratchRegisterScope temps(&masm);
@@ -13632,14 +13965,22 @@
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
for (i = reg_count; i >= 4; i -= 4) {
- __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ __ Push(r[i - 1], r[i - 2], r[i - 3], r[i - 4]);
}
// Finish off the leftovers.
switch (i) {
- case 3: __ Push(r[2], r[1], r[0]); break;
- case 2: __ Push(r[1], r[0]); break;
- case 1: __ Push(r[0]); break;
- default: VIXL_ASSERT(i == 0); break;
+ case 3:
+ __ Push(r[2], r[1], r[0]);
+ break;
+ case 2:
+ __ Push(r[1], r[0]);
+ break;
+ case 1:
+ __ Push(r[0]);
+ break;
+ default:
+ VIXL_ASSERT(i == 0);
+ break;
}
break;
case PushPopRegList:
@@ -13653,15 +13994,23 @@
switch (pop_method) {
case PushPopByFour:
// Pop low-numbered registers first (from the lowest addresses).
- for (i = 0; i <= (reg_count-4); i += 4) {
- __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ for (i = 0; i <= (reg_count - 4); i += 4) {
+ __ Pop(r[i], r[i + 1], r[i + 2], r[i + 3]);
}
// Finish off the leftovers.
switch (reg_count - i) {
- case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
- case 2: __ Pop(r[i], r[i+1]); break;
- case 1: __ Pop(r[i]); break;
- default: VIXL_ASSERT(i == reg_count); break;
+ case 3:
+ __ Pop(r[i], r[i + 1], r[i + 2]);
+ break;
+ case 2:
+ __ Pop(r[i], r[i + 1]);
+ break;
+ case 1:
+ __ Pop(r[i]);
+ break;
+ default:
+ VIXL_ASSERT(i == reg_count);
+ break;
}
break;
case PushPopRegList:
@@ -13683,7 +14032,7 @@
// Check that the register contents were preserved.
// Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
// that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffff >> (64-reg_size));
+ literal_base &= (0xffffffffffffffff >> (64 - reg_size));
for (int i = 0; i < reg_count; i++) {
if (x[i].Is(xzr)) {
ASSERT_EQUAL_64(0, x[i]);
@@ -13699,24 +14048,48 @@
TEST(push_pop_xreg_simple_32) {
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
- PushPopXRegSimpleHelper(count, claim, kWRegSize,
- PushPopByFour, PushPopByFour);
- PushPopXRegSimpleHelper(count, claim, kWRegSize,
- PushPopByFour, PushPopRegList);
- PushPopXRegSimpleHelper(count, claim, kWRegSize,
- PushPopRegList, PushPopByFour);
- PushPopXRegSimpleHelper(count, claim, kWRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kWRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kWRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kWRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kWRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
// Test with the maximum number of registers.
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kWRegSize, PushPopByFour, PushPopByFour);
+ claim,
+ kWRegSize,
+ PushPopByFour,
+ PushPopByFour);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kWRegSize, PushPopByFour, PushPopRegList);
+ claim,
+ kWRegSize,
+ PushPopByFour,
+ PushPopRegList);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kWRegSize, PushPopRegList, PushPopByFour);
+ claim,
+ kWRegSize,
+ PushPopRegList,
+ PushPopByFour);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kWRegSize, PushPopRegList, PushPopRegList);
+ claim,
+ kWRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
}
@@ -13724,24 +14097,48 @@
TEST(push_pop_xreg_simple_64) {
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
- PushPopXRegSimpleHelper(count, claim, kXRegSize,
- PushPopByFour, PushPopByFour);
- PushPopXRegSimpleHelper(count, claim, kXRegSize,
- PushPopByFour, PushPopRegList);
- PushPopXRegSimpleHelper(count, claim, kXRegSize,
- PushPopRegList, PushPopByFour);
- PushPopXRegSimpleHelper(count, claim, kXRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kXRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kXRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kXRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopXRegSimpleHelper(count,
+ claim,
+ kXRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
// Test with the maximum number of registers.
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kXRegSize, PushPopByFour, PushPopByFour);
+ claim,
+ kXRegSize,
+ PushPopByFour,
+ PushPopByFour);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kXRegSize, PushPopByFour, PushPopRegList);
+ claim,
+ kXRegSize,
+ PushPopByFour,
+ PushPopRegList);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kXRegSize, PushPopRegList, PushPopByFour);
+ claim,
+ kXRegSize,
+ PushPopRegList,
+ PushPopByFour);
PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
- claim, kXRegSize, PushPopRegList, PushPopRegList);
+ claim,
+ kXRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
}
@@ -13777,8 +14174,8 @@
// Work out which registers to use, based on reg_size.
FPRegister v[kNumberOfRegisters];
FPRegister d[kNumberOfRegisters];
- RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
- allowed);
+ RegList list =
+ PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count, allowed);
// Arbitrarily pick a register to use as a stack pointer.
const Register& stack_pointer = x10;
@@ -13821,14 +14218,22 @@
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
for (i = reg_count; i >= 4; i -= 4) {
- __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
+ __ Push(v[i - 1], v[i - 2], v[i - 3], v[i - 4]);
}
// Finish off the leftovers.
switch (i) {
- case 3: __ Push(v[2], v[1], v[0]); break;
- case 2: __ Push(v[1], v[0]); break;
- case 1: __ Push(v[0]); break;
- default: VIXL_ASSERT(i == 0); break;
+ case 3:
+ __ Push(v[2], v[1], v[0]);
+ break;
+ case 2:
+ __ Push(v[1], v[0]);
+ break;
+ case 1:
+ __ Push(v[0]);
+ break;
+ default:
+ VIXL_ASSERT(i == 0);
+ break;
}
break;
case PushPopRegList:
@@ -13842,15 +14247,23 @@
switch (pop_method) {
case PushPopByFour:
// Pop low-numbered registers first (from the lowest addresses).
- for (i = 0; i <= (reg_count-4); i += 4) {
- __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
+ for (i = 0; i <= (reg_count - 4); i += 4) {
+ __ Pop(v[i], v[i + 1], v[i + 2], v[i + 3]);
}
// Finish off the leftovers.
switch (reg_count - i) {
- case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
- case 2: __ Pop(v[i], v[i+1]); break;
- case 1: __ Pop(v[i]); break;
- default: VIXL_ASSERT(i == reg_count); break;
+ case 3:
+ __ Pop(v[i], v[i + 1], v[i + 2]);
+ break;
+ case 2:
+ __ Pop(v[i], v[i + 1]);
+ break;
+ case 1:
+ __ Pop(v[i]);
+ break;
+ default:
+ VIXL_ASSERT(i == reg_count);
+ break;
}
break;
case PushPopRegList:
@@ -13872,7 +14285,7 @@
// Check that the register contents were preserved.
// Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
// test that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffff >> (64-reg_size));
+ literal_base &= (0xffffffffffffffff >> (64 - reg_size));
for (int i = 0; i < reg_count; i++) {
uint64_t literal = literal_base * i;
double expected;
@@ -13887,24 +14300,48 @@
TEST(push_pop_fp_xreg_simple_32) {
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
- PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
- PushPopByFour, PushPopByFour);
- PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
- PushPopByFour, PushPopRegList);
- PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
- PushPopRegList, PushPopByFour);
- PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kSRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kSRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kSRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kSRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
// Test with the maximum number of registers.
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
- PushPopByFour, PushPopByFour);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
- PushPopByFour, PushPopRegList);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
- PushPopRegList, PushPopByFour);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kSRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kSRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kSRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kSRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
}
@@ -13912,24 +14349,48 @@
TEST(push_pop_fp_xreg_simple_64) {
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
- PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
- PushPopByFour, PushPopByFour);
- PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
- PushPopByFour, PushPopRegList);
- PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
- PushPopRegList, PushPopByFour);
- PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kDRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kDRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kDRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(count,
+ claim,
+ kDRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
// Test with the maximum number of registers.
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
- PushPopByFour, PushPopByFour);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
- PushPopByFour, PushPopRegList);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
- PushPopRegList, PushPopByFour);
- PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
- PushPopRegList, PushPopRegList);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kDRegSize,
+ PushPopByFour,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kDRegSize,
+ PushPopByFour,
+ PushPopRegList);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kDRegSize,
+ PushPopRegList,
+ PushPopByFour);
+ PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount,
+ claim,
+ kDRegSize,
+ PushPopRegList,
+ PushPopRegList);
}
}
@@ -14012,7 +14473,7 @@
// Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
// that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffff >> (64-reg_size));
+ literal_base &= (0xffffffffffffffff >> (64 - reg_size));
ASSERT_EQUAL_64(literal_base * 3, x[9]);
ASSERT_EQUAL_64(literal_base * 2, x[8]);
@@ -14178,7 +14639,7 @@
// If popping an even number of registers, the first one will be X-sized.
// Otherwise, the first one will be W-sized.
bool next_is_64 = !(reg_count & 1);
- for (int i = reg_count-1; i >= 0; i--) {
+ for (int i = reg_count - 1; i >= 0; i--) {
if (next_is_64) {
__ Pop(x[i]);
active_w_slots -= 2;
@@ -14491,8 +14952,8 @@
SETUP();
START();
- char const * test_plain_string = "Printf with no arguments.\n";
- char const * test_substring = "'This is a substring.'";
+ char const* test_plain_string = "Printf with no arguments.\n";
+ char const* test_substring = "'This is a substring.'";
RegisterDump before;
// Initialize x29 to the value of the stack pointer. We will use x29 as a
@@ -14535,14 +14996,18 @@
// Check that we don't clobber any registers.
before.Dump(&masm);
- __ Printf(test_plain_string); // NOLINT(runtime/printf)
+ __ Printf(test_plain_string); // NOLINT(runtime/printf)
__ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
- __ Printf("w5: %" PRId32 ", x5: %" PRId64"\n", w5, x5);
+ __ Printf("w5: %" PRId32 ", x5: %" PRId64 "\n", w5, x5);
__ Printf("d0: %f\n", d0);
__ Printf("Test %%s: %s\n", x2);
- __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32
+ "\n"
"x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
- w3, w4, x5, x6);
+ w3,
+ w4,
+ x5,
+ x6);
__ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
__ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
__ Printf("%g\n", d10);
@@ -14550,7 +15015,8 @@
// Print the stack pointer (sp).
__ Printf("StackPointer(sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
+ __ StackPointer(),
+ __ StackPointer().W());
// Test with a different stack pointer.
const Register old_stack_pointer = __ StackPointer();
@@ -14558,7 +15024,8 @@
__ SetStackPointer(x29);
// Print the stack pointer (not sp).
__ Printf("StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
+ __ StackPointer(),
+ __ StackPointer().W());
__ Mov(old_stack_pointer, __ StackPointer());
__ SetStackPointer(old_stack_pointer);
@@ -14567,9 +15034,15 @@
// Mixed argument types.
__ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
- w3, s1, x5, d3);
+ w3,
+ s1,
+ x5,
+ d3);
__ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
- s1, d3, w3, x5);
+ s1,
+ d3,
+ w3,
+ x5);
END();
RUN();
@@ -14588,8 +15061,8 @@
SETUP();
START();
- char const * test_plain_string = "Printf with no arguments.\n";
- char const * test_substring = "'This is a substring.'";
+ char const* test_plain_string = "Printf with no arguments.\n";
+ char const* test_substring = "'This is a substring.'";
__ PrintfNoPreserve(test_plain_string);
__ Mov(x19, x0);
@@ -14597,7 +15070,7 @@
// Test simple integer arguments.
__ Mov(x0, 1234);
__ Mov(x1, 0x1234);
- __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ PrintfNoPreserve("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
__ Mov(x20, x0);
// Test simple floating-point arguments.
@@ -14615,9 +15088,13 @@
__ Mov(w4, 0xffffffff);
__ Mov(x5, 0xffffffffffffffff);
__ Mov(x6, 0xffffffffffffffff);
- __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32
+ "\n"
"x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
- w3, w4, x5, x6);
+ w3,
+ w4,
+ x5,
+ x6);
__ Mov(x23, x0);
__ Fmov(s1, 1.234);
@@ -14641,9 +15118,10 @@
__ Mov(x29, old_stack_pointer);
__ SetStackPointer(x29);
// Print the stack pointer (not sp).
- __ PrintfNoPreserve(
- "StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
+ __ PrintfNoPreserve("StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32
+ "\n",
+ __ StackPointer(),
+ __ StackPointer().W());
__ Mov(x27, x0);
__ Mov(old_stack_pointer, __ StackPointer());
__ SetStackPointer(old_stack_pointer);
@@ -14661,7 +15139,10 @@
__ Mov(x5, 0xffffffffffffffff);
__ Fmov(d3, 3.456);
__ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
- w3, s1, x5, d3);
+ w3,
+ s1,
+ x5,
+ d3);
__ Mov(x29, x0);
END();
@@ -15439,12 +15920,12 @@
// As above, but get suitably-aligned values for ldxp and stxp.
uint32_t wp_data[] = {0, 0, 0, 0, 0};
- uint32_t * wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
- wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
+ uint32_t* wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
+ wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
wp[2] = 0x87654321;
uint64_t xp_data[] = {0, 0, 0, 0, 0};
- uint64_t * xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
- xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
+ uint64_t* xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
+ xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
xp[2] = 0x0fedcba987654321;
SETUP();
@@ -15540,12 +16021,12 @@
// As above, but get suitably-aligned values for ldxp and stxp.
uint32_t wp_data[] = {0, 0, 0, 0, 0};
- uint32_t * wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
- wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
+ uint32_t* wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
+ wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
wp[2] = 0x87654321;
uint64_t xp_data[] = {0, 0, 0, 0, 0};
- uint64_t * xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
- xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
+ uint64_t* xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
+ xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
xp[2] = 0x0fedcba987654321;
SETUP();
@@ -15634,7 +16115,7 @@
TEST(clrex) {
// This data should never be written.
uint64_t data[] = {0, 0, 0};
- uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
+ uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
SETUP();
START();
@@ -15738,7 +16219,7 @@
// Check that the simulator occasionally makes store-exclusive fail.
TEST(ldxr_stxr_fail) {
uint64_t data[] = {0, 0, 0};
- uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
+ uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
// Impose a hard limit on the number of attempts, so the test cannot hang.
static const uint64_t kWatchdog = 10000;
@@ -15822,7 +16303,7 @@
// Check that the simulator occasionally makes store-exclusive fail.
TEST(ldaxr_stlxr_fail) {
uint64_t data[] = {0, 0, 0};
- uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
+ uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
// Impose a hard limit on the number of attempts, so the test cannot hang.
static const uint64_t kWatchdog = 10000;
@@ -15903,7 +16384,7 @@
TEST(load_store_tagged_immediate_offset) {
- uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
+ uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
int tag_count = sizeof(tags) / sizeof(tags[0]);
const int kMaxDataLength = 160;
@@ -15927,7 +16408,6 @@
memset(dst, 0, kMaxDataLength);
SETUP();
- ALLOW_ASM();
START();
__ Mov(x0, src_tagged);
@@ -15936,94 +16416,160 @@
int offset = 0;
// Scaled-immediate offsets.
- __ ldp(q0, q1, MemOperand(x0, offset));
- __ stp(q0, q1, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(q0, q1, MemOperand(x0, offset));
+ __ stp(q0, q1, MemOperand(x1, offset));
+ }
offset += 2 * kQRegSizeInBytes;
- __ ldp(x2, x3, MemOperand(x0, offset));
- __ stp(x2, x3, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(x2, x3, MemOperand(x0, offset));
+ __ stp(x2, x3, MemOperand(x1, offset));
+ }
offset += 2 * kXRegSizeInBytes;
- __ ldpsw(x2, x3, MemOperand(x0, offset));
- __ stp(w2, w3, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldpsw(x2, x3, MemOperand(x0, offset));
+ __ stp(w2, w3, MemOperand(x1, offset));
+ }
offset += 2 * kWRegSizeInBytes;
- __ ldp(d0, d1, MemOperand(x0, offset));
- __ stp(d0, d1, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(d0, d1, MemOperand(x0, offset));
+ __ stp(d0, d1, MemOperand(x1, offset));
+ }
offset += 2 * kDRegSizeInBytes;
- __ ldp(w2, w3, MemOperand(x0, offset));
- __ stp(w2, w3, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(w2, w3, MemOperand(x0, offset));
+ __ stp(w2, w3, MemOperand(x1, offset));
+ }
offset += 2 * kWRegSizeInBytes;
- __ ldp(s0, s1, MemOperand(x0, offset));
- __ stp(s0, s1, MemOperand(x1, offset));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(s0, s1, MemOperand(x0, offset));
+ __ stp(s0, s1, MemOperand(x1, offset));
+ }
offset += 2 * kSRegSizeInBytes;
- __ ldr(x2, MemOperand(x0, offset), RequireScaledOffset);
- __ str(x2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(x2, MemOperand(x0, offset), RequireScaledOffset);
+ __ str(x2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += kXRegSizeInBytes;
- __ ldr(d0, MemOperand(x0, offset), RequireScaledOffset);
- __ str(d0, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(d0, MemOperand(x0, offset), RequireScaledOffset);
+ __ str(d0, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += kDRegSizeInBytes;
- __ ldr(w2, MemOperand(x0, offset), RequireScaledOffset);
- __ str(w2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(w2, MemOperand(x0, offset), RequireScaledOffset);
+ __ str(w2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += kWRegSizeInBytes;
- __ ldr(s0, MemOperand(x0, offset), RequireScaledOffset);
- __ str(s0, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(s0, MemOperand(x0, offset), RequireScaledOffset);
+ __ str(s0, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += kSRegSizeInBytes;
- __ ldrh(w2, MemOperand(x0, offset), RequireScaledOffset);
- __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrh(w2, MemOperand(x0, offset), RequireScaledOffset);
+ __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += 2;
- __ ldrsh(w2, MemOperand(x0, offset), RequireScaledOffset);
- __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsh(w2, MemOperand(x0, offset), RequireScaledOffset);
+ __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += 2;
- __ ldrb(w2, MemOperand(x0, offset), RequireScaledOffset);
- __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrb(w2, MemOperand(x0, offset), RequireScaledOffset);
+ __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += 1;
- __ ldrsb(w2, MemOperand(x0, offset), RequireScaledOffset);
- __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsb(w2, MemOperand(x0, offset), RequireScaledOffset);
+ __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
+ }
offset += 1;
// Unscaled-immediate offsets.
- __ ldur(x2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ stur(x2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldur(x2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ stur(x2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += kXRegSizeInBytes;
- __ ldur(d0, MemOperand(x0, offset), RequireUnscaledOffset);
- __ stur(d0, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldur(d0, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ stur(d0, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += kDRegSizeInBytes;
- __ ldur(w2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ stur(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldur(w2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ stur(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += kWRegSizeInBytes;
- __ ldur(s0, MemOperand(x0, offset), RequireUnscaledOffset);
- __ stur(s0, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldur(s0, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ stur(s0, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += kSRegSizeInBytes;
- __ ldurh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldurh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += 2;
- __ ldursh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldursh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += 2;
- __ ldurb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldurb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += 1;
- __ ldursb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
- __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldursb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
+ __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
+ }
offset += 1;
// Extract the tag (so we can test that it was preserved correctly).
@@ -16049,7 +16595,7 @@
TEST(load_store_tagged_immediate_preindex) {
- uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
+ uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
int tag_count = sizeof(tags) / sizeof(tags[0]);
const int kMaxDataLength = 128;
@@ -16075,7 +16621,6 @@
}
SETUP();
- ALLOW_ASM();
START();
// Each MemOperand must apply a pre-index equal to the size of the
@@ -16088,73 +16633,115 @@
__ Mov(x0, src_tagged - preindex);
__ Mov(x1, dst_tagged - preindex);
- __ ldp(q0, q1, MemOperand(x0, preindex, PreIndex));
- __ stp(q0, q1, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(q0, q1, MemOperand(x0, preindex, PreIndex));
+ __ stp(q0, q1, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kQRegSizeInBytes;
data_length = preindex;
- __ ldp(x2, x3, MemOperand(x0, preindex, PreIndex));
- __ stp(x2, x3, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(x2, x3, MemOperand(x0, preindex, PreIndex));
+ __ stp(x2, x3, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kXRegSizeInBytes;
data_length += preindex;
- __ ldpsw(x2, x3, MemOperand(x0, preindex, PreIndex));
- __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldpsw(x2, x3, MemOperand(x0, preindex, PreIndex));
+ __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kWRegSizeInBytes;
data_length += preindex;
- __ ldp(d0, d1, MemOperand(x0, preindex, PreIndex));
- __ stp(d0, d1, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(d0, d1, MemOperand(x0, preindex, PreIndex));
+ __ stp(d0, d1, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kDRegSizeInBytes;
data_length += preindex;
- __ ldp(w2, w3, MemOperand(x0, preindex, PreIndex));
- __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(w2, w3, MemOperand(x0, preindex, PreIndex));
+ __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kWRegSizeInBytes;
data_length += preindex;
- __ ldp(s0, s1, MemOperand(x0, preindex, PreIndex));
- __ stp(s0, s1, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(s0, s1, MemOperand(x0, preindex, PreIndex));
+ __ stp(s0, s1, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2 * kSRegSizeInBytes;
data_length += preindex;
- __ ldr(x2, MemOperand(x0, preindex, PreIndex));
- __ str(x2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(x2, MemOperand(x0, preindex, PreIndex));
+ __ str(x2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = kXRegSizeInBytes;
data_length += preindex;
- __ ldr(d0, MemOperand(x0, preindex, PreIndex));
- __ str(d0, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(d0, MemOperand(x0, preindex, PreIndex));
+ __ str(d0, MemOperand(x1, preindex, PreIndex));
+ }
preindex = kDRegSizeInBytes;
data_length += preindex;
- __ ldr(w2, MemOperand(x0, preindex, PreIndex));
- __ str(w2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(w2, MemOperand(x0, preindex, PreIndex));
+ __ str(w2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = kWRegSizeInBytes;
data_length += preindex;
- __ ldr(s0, MemOperand(x0, preindex, PreIndex));
- __ str(s0, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(s0, MemOperand(x0, preindex, PreIndex));
+ __ str(s0, MemOperand(x1, preindex, PreIndex));
+ }
preindex = kSRegSizeInBytes;
data_length += preindex;
- __ ldrh(w2, MemOperand(x0, preindex, PreIndex));
- __ strh(w2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrh(w2, MemOperand(x0, preindex, PreIndex));
+ __ strh(w2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2;
data_length += preindex;
- __ ldrsh(w2, MemOperand(x0, preindex, PreIndex));
- __ strh(w2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsh(w2, MemOperand(x0, preindex, PreIndex));
+ __ strh(w2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 2;
data_length += preindex;
- __ ldrb(w2, MemOperand(x0, preindex, PreIndex));
- __ strb(w2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrb(w2, MemOperand(x0, preindex, PreIndex));
+ __ strb(w2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 1;
data_length += preindex;
- __ ldrsb(w2, MemOperand(x0, preindex, PreIndex));
- __ strb(w2, MemOperand(x1, preindex, PreIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsb(w2, MemOperand(x0, preindex, PreIndex));
+ __ strb(w2, MemOperand(x1, preindex, PreIndex));
+ }
preindex = 1;
data_length += preindex;
@@ -16179,7 +16766,7 @@
TEST(load_store_tagged_immediate_postindex) {
- uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
+ uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
int tag_count = sizeof(tags) / sizeof(tags[0]);
const int kMaxDataLength = 128;
@@ -16205,7 +16792,6 @@
}
SETUP();
- ALLOW_ASM();
START();
int postindex = 2 * kXRegSizeInBytes;
@@ -16214,73 +16800,115 @@
__ Mov(x0, src_tagged);
__ Mov(x1, dst_tagged);
- __ ldp(x2, x3, MemOperand(x0, postindex, PostIndex));
- __ stp(x2, x3, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(x2, x3, MemOperand(x0, postindex, PostIndex));
+ __ stp(x2, x3, MemOperand(x1, postindex, PostIndex));
+ }
data_length = postindex;
postindex = 2 * kQRegSizeInBytes;
- __ ldp(q0, q1, MemOperand(x0, postindex, PostIndex));
- __ stp(q0, q1, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(q0, q1, MemOperand(x0, postindex, PostIndex));
+ __ stp(q0, q1, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2 * kWRegSizeInBytes;
- __ ldpsw(x2, x3, MemOperand(x0, postindex, PostIndex));
- __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldpsw(x2, x3, MemOperand(x0, postindex, PostIndex));
+ __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2 * kDRegSizeInBytes;
- __ ldp(d0, d1, MemOperand(x0, postindex, PostIndex));
- __ stp(d0, d1, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(d0, d1, MemOperand(x0, postindex, PostIndex));
+ __ stp(d0, d1, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2 * kWRegSizeInBytes;
- __ ldp(w2, w3, MemOperand(x0, postindex, PostIndex));
- __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(w2, w3, MemOperand(x0, postindex, PostIndex));
+ __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2 * kSRegSizeInBytes;
- __ ldp(s0, s1, MemOperand(x0, postindex, PostIndex));
- __ stp(s0, s1, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldp(s0, s1, MemOperand(x0, postindex, PostIndex));
+ __ stp(s0, s1, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = kXRegSizeInBytes;
- __ ldr(x2, MemOperand(x0, postindex, PostIndex));
- __ str(x2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(x2, MemOperand(x0, postindex, PostIndex));
+ __ str(x2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = kDRegSizeInBytes;
- __ ldr(d0, MemOperand(x0, postindex, PostIndex));
- __ str(d0, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(d0, MemOperand(x0, postindex, PostIndex));
+ __ str(d0, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = kWRegSizeInBytes;
- __ ldr(w2, MemOperand(x0, postindex, PostIndex));
- __ str(w2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(w2, MemOperand(x0, postindex, PostIndex));
+ __ str(w2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = kSRegSizeInBytes;
- __ ldr(s0, MemOperand(x0, postindex, PostIndex));
- __ str(s0, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(s0, MemOperand(x0, postindex, PostIndex));
+ __ str(s0, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2;
- __ ldrh(w2, MemOperand(x0, postindex, PostIndex));
- __ strh(w2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrh(w2, MemOperand(x0, postindex, PostIndex));
+ __ strh(w2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 2;
- __ ldrsh(w2, MemOperand(x0, postindex, PostIndex));
- __ strh(w2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsh(w2, MemOperand(x0, postindex, PostIndex));
+ __ strh(w2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 1;
- __ ldrb(w2, MemOperand(x0, postindex, PostIndex));
- __ strb(w2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrb(w2, MemOperand(x0, postindex, PostIndex));
+ __ strb(w2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
postindex = 1;
- __ ldrsb(w2, MemOperand(x0, postindex, PostIndex));
- __ strb(w2, MemOperand(x1, postindex, PostIndex));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsb(w2, MemOperand(x0, postindex, PostIndex));
+ __ strb(w2, MemOperand(x1, postindex, PostIndex));
+ }
data_length += postindex;
VIXL_ASSERT(kMaxDataLength >= data_length);
@@ -16304,7 +16932,7 @@
TEST(load_store_tagged_register_offset) {
- uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
+ uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
int tag_count = sizeof(tags) / sizeof(tags[0]);
const int kMaxDataLength = 128;
@@ -16335,50 +16963,73 @@
}
SETUP();
- ALLOW_ASM();
START();
__ Mov(x0, src_tagged);
__ Mov(x1, dst_tagged);
__ Mov(x10, offset_base + data_length);
- __ ldr(x2, MemOperand(x0, x10));
- __ str(x2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(x2, MemOperand(x0, x10));
+ __ str(x2, MemOperand(x1, x10));
+ }
data_length += kXRegSizeInBytes;
__ Mov(x10, offset_base + data_length);
- __ ldr(d0, MemOperand(x0, x10));
- __ str(d0, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(d0, MemOperand(x0, x10));
+ __ str(d0, MemOperand(x1, x10));
+ }
data_length += kDRegSizeInBytes;
__ Mov(x10, offset_base + data_length);
- __ ldr(w2, MemOperand(x0, x10));
- __ str(w2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(w2, MemOperand(x0, x10));
+ __ str(w2, MemOperand(x1, x10));
+ }
data_length += kWRegSizeInBytes;
__ Mov(x10, offset_base + data_length);
- __ ldr(s0, MemOperand(x0, x10));
- __ str(s0, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldr(s0, MemOperand(x0, x10));
+ __ str(s0, MemOperand(x1, x10));
+ }
data_length += kSRegSizeInBytes;
__ Mov(x10, offset_base + data_length);
- __ ldrh(w2, MemOperand(x0, x10));
- __ strh(w2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrh(w2, MemOperand(x0, x10));
+ __ strh(w2, MemOperand(x1, x10));
+ }
data_length += 2;
__ Mov(x10, offset_base + data_length);
- __ ldrsh(w2, MemOperand(x0, x10));
- __ strh(w2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsh(w2, MemOperand(x0, x10));
+ __ strh(w2, MemOperand(x1, x10));
+ }
data_length += 2;
__ Mov(x10, offset_base + data_length);
- __ ldrb(w2, MemOperand(x0, x10));
- __ strb(w2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrb(w2, MemOperand(x0, x10));
+ __ strb(w2, MemOperand(x1, x10));
+ }
data_length += 1;
__ Mov(x10, offset_base + data_length);
- __ ldrsb(w2, MemOperand(x0, x10));
- __ strb(w2, MemOperand(x1, x10));
+ {
+ ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
+ __ ldrsb(w2, MemOperand(x0, x10));
+ __ strb(w2, MemOperand(x1, x10));
+ }
data_length += 1;
VIXL_ASSERT(kMaxDataLength >= data_length);
@@ -16404,8 +17055,8 @@
TEST(load_store_tagged_register_postindex) {
- uint64_t src[] = { 0x0706050403020100, 0x0f0e0d0c0b0a0908 };
- uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
+ uint64_t src[] = {0x0706050403020100, 0x0f0e0d0c0b0a0908};
+ uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
int tag_count = sizeof(tags) / sizeof(tags[0]);
for (int j = 0; j < tag_count; j++) {
@@ -16546,8 +17197,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Addp(v16.V16B(), v0.V16B(), v1.V16B());
END();
@@ -16763,8 +17414,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Movi(v16.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v17.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
@@ -16782,14 +17433,13 @@
}
-
TEST(neon_3same_absdiff) {
SETUP();
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Movi(v16.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v17.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
@@ -16814,8 +17464,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000155aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55aa, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000155aaff55ff00, 0xaa55ff55555500ff);
__ Mul(v16.V4H(), v0.V4H(), v1.H(), 0);
@@ -16869,13 +17519,13 @@
START();
- __ Movi(v0.V2D(), 0xaa55ff55555500ff, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000155aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xaa55ff55555500ff, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000155aaff55ff00, 0xaa55ff55555500ff);
- __ Smull(v16.V4S(), v0.V4H(), v1.H(), 7);
+ __ Smull(v16.V4S(), v0.V4H(), v1.H(), 7);
__ Smull2(v17.V4S(), v0.V8H(), v1.H(), 0);
- __ Umull(v18.V4S(), v0.V4H(), v1.H(), 7);
+ __ Umull(v18.V4S(), v0.V4H(), v1.H(), 7);
__ Umull2(v19.V4S(), v0.V8H(), v1.H(), 0);
__ Movi(v20.V2D(), 0x0000000100000002, 0x0000000200000001);
@@ -16883,9 +17533,9 @@
__ Movi(v22.V2D(), 0x0000000100000002, 0x0000000200000001);
__ Movi(v23.V2D(), 0x0000000100000002, 0x0000000200000001);
- __ Smlal(v20.V4S(), v0.V4H(), v1.H(), 7);
+ __ Smlal(v20.V4S(), v0.V4H(), v1.H(), 7);
__ Smlal2(v21.V4S(), v0.V8H(), v1.H(), 0);
- __ Umlal(v22.V4S(), v0.V4H(), v1.H(), 7);
+ __ Umlal(v22.V4S(), v0.V4H(), v1.H(), 7);
__ Umlal2(v23.V4S(), v0.V8H(), v1.H(), 0);
__ Movi(v24.V2D(), 0xffffff00ffffaa55, 0x000000ff000055aa);
@@ -16893,9 +17543,9 @@
__ Movi(v26.V2D(), 0x0000ff000000aa55, 0x000000ff000055aa);
__ Movi(v27.V2D(), 0x00a9aaab00fe55ab, 0x0054ffab0000fe01);
- __ Smlsl(v24.V4S(), v0.V4H(), v1.H(), 7);
+ __ Smlsl(v24.V4S(), v0.V4H(), v1.H(), 7);
__ Smlsl2(v25.V4S(), v0.V8H(), v1.H(), 0);
- __ Umlsl(v26.V4S(), v0.V4H(), v1.H(), 7);
+ __ Umlsl(v26.V4S(), v0.V4H(), v1.H(), 7);
__ Umlsl2(v27.V4S(), v0.V8H(), v1.H(), 0);
END();
@@ -16974,8 +17624,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Movi(v16.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v17.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v18.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
@@ -17002,14 +17652,14 @@
START();
- __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
- __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
- __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
- __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
+ __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
+ __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
+ __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
+ __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
- __ Sqdmull(v16.V4S(), v0.V4H(), v1.V4H());
+ __ Sqdmull(v16.V4S(), v0.V4H(), v1.V4H());
__ Sqdmull2(v17.V4S(), v0.V8H(), v1.V8H());
- __ Sqdmull(v18.V2D(), v2.V2S(), v3.V2S());
+ __ Sqdmull(v18.V2D(), v2.V2S(), v3.V2S());
__ Sqdmull2(v19.V2D(), v2.V4S(), v3.V4S());
__ Sqdmull(s20, h0, h1);
__ Sqdmull(d21, s2, s3);
@@ -17032,10 +17682,10 @@
START();
- __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
- __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
- __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
- __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
+ __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
+ __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
+ __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
+ __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
__ Movi(v16.V2D(), 0xffffffff00000001, 0x8fffffff00000001);
__ Movi(v17.V2D(), 0x00000001ffffffff, 0x00000001ffffffff);
@@ -17044,9 +17694,9 @@
__ Movi(v20.V2D(), 0, 0x00000001);
__ Movi(v21.V2D(), 0, 0x00000001);
- __ Sqdmlal(v16.V4S(), v0.V4H(), v1.V4H());
+ __ Sqdmlal(v16.V4S(), v0.V4H(), v1.V4H());
__ Sqdmlal2(v17.V4S(), v0.V8H(), v1.V8H());
- __ Sqdmlal(v18.V2D(), v2.V2S(), v3.V2S());
+ __ Sqdmlal(v18.V2D(), v2.V2S(), v3.V2S());
__ Sqdmlal2(v19.V2D(), v2.V4S(), v3.V4S());
__ Sqdmlal(s20, h0, h1);
__ Sqdmlal(d21, s2, s3);
@@ -17069,10 +17719,10 @@
START();
- __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
- __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
- __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
- __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
+ __ Movi(v0.V2D(), 0x7fff7fff80008000, 0x80007fff7fff8000);
+ __ Movi(v1.V2D(), 0x80007fff7fff8000, 0x7fff7fff80008000);
+ __ Movi(v2.V2D(), 0x800000007fffffff, 0x7fffffff80000000);
+ __ Movi(v3.V2D(), 0x8000000080000000, 0x8000000080000000);
__ Movi(v16.V2D(), 0xffffffff00000001, 0x7ffffffe80000001);
__ Movi(v17.V2D(), 0x00000001ffffffff, 0x7ffffffe00000001);
@@ -17081,9 +17731,9 @@
__ Movi(v20.V2D(), 0, 0x00000001);
__ Movi(v21.V2D(), 0, 0x00000001);
- __ Sqdmlsl(v16.V4S(), v0.V4H(), v1.V4H());
+ __ Sqdmlsl(v16.V4S(), v0.V4H(), v1.V4H());
__ Sqdmlsl2(v17.V4S(), v0.V8H(), v1.V8H());
- __ Sqdmlsl(v18.V2D(), v2.V2S(), v3.V2S());
+ __ Sqdmlsl(v18.V2D(), v2.V2S(), v3.V2S());
__ Sqdmlsl2(v19.V2D(), v2.V4S(), v3.V4S());
__ Sqdmlsl(s20, h0, h1);
__ Sqdmlsl(d21, s2, s3);
@@ -17107,8 +17757,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Movi(v16.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v17.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v18.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
@@ -17135,8 +17785,8 @@
START();
- __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
- __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
+ __ Movi(v0.V2D(), 0xff00aa5500ff55ab, 0xff00aa5500ff55aa);
+ __ Movi(v1.V2D(), 0x000055aaff55ff00, 0xaa55ff55555500ff);
__ Movi(v16.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v17.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
__ Movi(v18.V2D(), 0x0102030405060708, 0x090a0b0c0d0e0f00);
@@ -17432,15 +18082,15 @@
__ Movi(v0.V2D(), 0x0001000200030004, 0x0000000000000000);
__ Movi(v1.V2D(), 0x000055aaff55ff00, 0x0000ff55555500ff);
- __ Cmeq(v16.V8B(), v1.V8B(), 0);
+ __ Cmeq(v16.V8B(), v1.V8B(), 0);
__ Cmeq(v17.V16B(), v1.V16B(), 0);
- __ Cmeq(v18.V4H(), v1.V4H(), 0);
- __ Cmeq(v19.V8H(), v1.V8H(), 0);
- __ Cmeq(v20.V2S(), v0.V2S(), 0);
- __ Cmeq(v21.V4S(), v0.V4S(), 0);
- __ Cmeq(d22, d0, 0);
- __ Cmeq(d23, d1, 0);
- __ Cmeq(v24.V2D(), v0.V2D(), 0);
+ __ Cmeq(v18.V4H(), v1.V4H(), 0);
+ __ Cmeq(v19.V8H(), v1.V8H(), 0);
+ __ Cmeq(v20.V2S(), v0.V2S(), 0);
+ __ Cmeq(v21.V4S(), v0.V4S(), 0);
+ __ Cmeq(d22, d0, 0);
+ __ Cmeq(d23, d1, 0);
+ __ Cmeq(v24.V2D(), v0.V2D(), 0);
END();
@@ -17466,15 +18116,15 @@
__ Movi(v0.V2D(), 0xff01000200030004, 0x0000000000000000);
__ Movi(v1.V2D(), 0x000055aaff55ff00, 0x0000ff55555500ff);
- __ Cmge(v16.V8B(), v1.V8B(), 0);
+ __ Cmge(v16.V8B(), v1.V8B(), 0);
__ Cmge(v17.V16B(), v1.V16B(), 0);
- __ Cmge(v18.V4H(), v1.V4H(), 0);
- __ Cmge(v19.V8H(), v1.V8H(), 0);
- __ Cmge(v20.V2S(), v0.V2S(), 0);
- __ Cmge(v21.V4S(), v0.V4S(), 0);
- __ Cmge(d22, d0, 0);
- __ Cmge(d23, d1, 0);
- __ Cmge(v24.V2D(), v0.V2D(), 0);
+ __ Cmge(v18.V4H(), v1.V4H(), 0);
+ __ Cmge(v19.V8H(), v1.V8H(), 0);
+ __ Cmge(v20.V2S(), v0.V2S(), 0);
+ __ Cmge(v21.V4S(), v0.V4S(), 0);
+ __ Cmge(d22, d0, 0);
+ __ Cmge(d23, d1, 0);
+ __ Cmge(v24.V2D(), v0.V2D(), 0);
END();
@@ -17500,15 +18150,15 @@
__ Movi(v0.V2D(), 0x0001000200030004, 0xff00000000000000);
__ Movi(v1.V2D(), 0x000055aaff55ff00, 0x0000ff55555500ff);
- __ Cmlt(v16.V8B(), v1.V8B(), 0);
+ __ Cmlt(v16.V8B(), v1.V8B(), 0);
__ Cmlt(v17.V16B(), v1.V16B(), 0);
- __ Cmlt(v18.V4H(), v1.V4H(), 0);
- __ Cmlt(v19.V8H(), v1.V8H(), 0);
- __ Cmlt(v20.V2S(), v1.V2S(), 0);
- __ Cmlt(v21.V4S(), v1.V4S(), 0);
- __ Cmlt(d22, d0, 0);
- __ Cmlt(d23, d1, 0);
- __ Cmlt(v24.V2D(), v0.V2D(), 0);
+ __ Cmlt(v18.V4H(), v1.V4H(), 0);
+ __ Cmlt(v19.V8H(), v1.V8H(), 0);
+ __ Cmlt(v20.V2S(), v1.V2S(), 0);
+ __ Cmlt(v21.V4S(), v1.V4S(), 0);
+ __ Cmlt(d22, d0, 0);
+ __ Cmlt(d23, d1, 0);
+ __ Cmlt(v24.V2D(), v0.V2D(), 0);
END();
@@ -17534,15 +18184,15 @@
__ Movi(v0.V2D(), 0x0001000200030004, 0x0000000000000000);
__ Movi(v1.V2D(), 0x000055aaff55ff00, 0x0000ff55555500ff);
- __ Cmle(v16.V8B(), v1.V8B(), 0);
+ __ Cmle(v16.V8B(), v1.V8B(), 0);
__ Cmle(v17.V16B(), v1.V16B(), 0);
- __ Cmle(v18.V4H(), v1.V4H(), 0);
- __ Cmle(v19.V8H(), v1.V8H(), 0);
- __ Cmle(v20.V2S(), v1.V2S(), 0);
- __ Cmle(v21.V4S(), v1.V4S(), 0);
- __ Cmle(d22, d0, 0);
- __ Cmle(d23, d1, 0);
- __ Cmle(v24.V2D(), v0.V2D(), 0);
+ __ Cmle(v18.V4H(), v1.V4H(), 0);
+ __ Cmle(v19.V8H(), v1.V8H(), 0);
+ __ Cmle(v20.V2S(), v1.V2S(), 0);
+ __ Cmle(v21.V4S(), v1.V4S(), 0);
+ __ Cmle(d22, d0, 0);
+ __ Cmle(d23, d1, 0);
+ __ Cmle(v24.V2D(), v0.V2D(), 0);
END();
@@ -17568,15 +18218,15 @@
__ Movi(v0.V2D(), 0x0001000200030004, 0x0000000000000000);
__ Movi(v1.V2D(), 0x000055aaff55ff00, 0x0000ff55555500ff);
- __ Cmgt(v16.V8B(), v1.V8B(), 0);
+ __ Cmgt(v16.V8B(), v1.V8B(), 0);
__ Cmgt(v17.V16B(), v1.V16B(), 0);
- __ Cmgt(v18.V4H(), v1.V4H(), 0);
- __ Cmgt(v19.V8H(), v1.V8H(), 0);
- __ Cmgt(v20.V2S(), v0.V2S(), 0);
- __ Cmgt(v21.V4S(), v0.V4S(), 0);
- __ Cmgt(d22, d0, 0);
- __ Cmgt(d23, d1, 0);
- __ Cmgt(v24.V2D(), v0.V2D(), 0);
+ __ Cmgt(v18.V4H(), v1.V4H(), 0);
+ __ Cmgt(v19.V8H(), v1.V8H(), 0);
+ __ Cmgt(v20.V2S(), v0.V2S(), 0);
+ __ Cmgt(v21.V4S(), v0.V4S(), 0);
+ __ Cmgt(d22, d0, 0);
+ __ Cmgt(d23, d1, 0);
+ __ Cmgt(v24.V2D(), v0.V2D(), 0);
END();
@@ -17605,15 +18255,15 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Neg(v16.V8B(), v0.V8B());
+ __ Neg(v16.V8B(), v0.V8B());
__ Neg(v17.V16B(), v0.V16B());
- __ Neg(v18.V4H(), v1.V4H());
- __ Neg(v19.V8H(), v1.V8H());
- __ Neg(v20.V2S(), v2.V2S());
- __ Neg(v21.V4S(), v2.V4S());
+ __ Neg(v18.V4H(), v1.V4H());
+ __ Neg(v19.V8H(), v1.V8H());
+ __ Neg(v20.V2S(), v2.V2S());
+ __ Neg(v21.V4S(), v2.V4S());
__ Neg(d22, d3);
- __ Neg(v23.V2D(), v3.V2D());
- __ Neg(v24.V2D(), v4.V2D());
+ __ Neg(v23.V2D(), v3.V2D());
+ __ Neg(v24.V2D(), v4.V2D());
END();
@@ -17643,14 +18293,14 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqneg(v16.V8B(), v0.V8B());
+ __ Sqneg(v16.V8B(), v0.V8B());
__ Sqneg(v17.V16B(), v0.V16B());
- __ Sqneg(v18.V4H(), v1.V4H());
- __ Sqneg(v19.V8H(), v1.V8H());
- __ Sqneg(v20.V2S(), v2.V2S());
- __ Sqneg(v21.V4S(), v2.V4S());
- __ Sqneg(v22.V2D(), v3.V2D());
- __ Sqneg(v23.V2D(), v4.V2D());
+ __ Sqneg(v18.V4H(), v1.V4H());
+ __ Sqneg(v19.V8H(), v1.V8H());
+ __ Sqneg(v20.V2S(), v2.V2S());
+ __ Sqneg(v21.V4S(), v2.V4S());
+ __ Sqneg(v22.V2D(), v3.V2D());
+ __ Sqneg(v23.V2D(), v4.V2D());
__ Sqneg(b24, b0);
__ Sqneg(h25, h1);
@@ -17689,15 +18339,15 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Abs(v16.V8B(), v0.V8B());
+ __ Abs(v16.V8B(), v0.V8B());
__ Abs(v17.V16B(), v0.V16B());
- __ Abs(v18.V4H(), v1.V4H());
- __ Abs(v19.V8H(), v1.V8H());
- __ Abs(v20.V2S(), v2.V2S());
- __ Abs(v21.V4S(), v2.V4S());
+ __ Abs(v18.V4H(), v1.V4H());
+ __ Abs(v19.V8H(), v1.V8H());
+ __ Abs(v20.V2S(), v2.V2S());
+ __ Abs(v21.V4S(), v2.V4S());
__ Abs(d22, d3);
- __ Abs(v23.V2D(), v3.V2D());
- __ Abs(v24.V2D(), v4.V2D());
+ __ Abs(v23.V2D(), v3.V2D());
+ __ Abs(v24.V2D(), v4.V2D());
END();
@@ -17727,14 +18377,14 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqabs(v16.V8B(), v0.V8B());
+ __ Sqabs(v16.V8B(), v0.V8B());
__ Sqabs(v17.V16B(), v0.V16B());
- __ Sqabs(v18.V4H(), v1.V4H());
- __ Sqabs(v19.V8H(), v1.V8H());
- __ Sqabs(v20.V2S(), v2.V2S());
- __ Sqabs(v21.V4S(), v2.V4S());
- __ Sqabs(v22.V2D(), v3.V2D());
- __ Sqabs(v23.V2D(), v4.V2D());
+ __ Sqabs(v18.V4H(), v1.V4H());
+ __ Sqabs(v19.V8H(), v1.V8H());
+ __ Sqabs(v20.V2S(), v2.V2S());
+ __ Sqabs(v21.V4S(), v2.V4S());
+ __ Sqabs(v22.V2D(), v3.V2D());
+ __ Sqabs(v23.V2D(), v4.V2D());
__ Sqabs(b24, b0);
__ Sqabs(h25, h1);
@@ -17955,12 +18605,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Xtn(v16.V8B(), v0.V8H());
+ __ Xtn(v16.V8B(), v0.V8H());
__ Xtn2(v16.V16B(), v1.V8H());
- __ Xtn(v17.V4H(), v1.V4S());
- __ Xtn2(v17.V8H(), v2.V4S());
- __ Xtn(v18.V2S(), v3.V2D());
- __ Xtn2(v18.V4S(), v4.V2D());
+ __ Xtn(v17.V4H(), v1.V4S());
+ __ Xtn2(v17.V8H(), v2.V4S());
+ __ Xtn(v18.V2S(), v3.V2D());
+ __ Xtn2(v18.V4S(), v4.V2D());
END();
@@ -17983,15 +18633,15 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqxtn(v16.V8B(), v0.V8H());
+ __ Sqxtn(v16.V8B(), v0.V8H());
__ Sqxtn2(v16.V16B(), v1.V8H());
- __ Sqxtn(v17.V4H(), v1.V4S());
- __ Sqxtn2(v17.V8H(), v2.V4S());
- __ Sqxtn(v18.V2S(), v3.V2D());
- __ Sqxtn2(v18.V4S(), v4.V2D());
- __ Sqxtn(b19, h0);
- __ Sqxtn(h20, s0);
- __ Sqxtn(s21, d0);
+ __ Sqxtn(v17.V4H(), v1.V4S());
+ __ Sqxtn2(v17.V8H(), v2.V4S());
+ __ Sqxtn(v18.V2S(), v3.V2D());
+ __ Sqxtn2(v18.V4S(), v4.V2D());
+ __ Sqxtn(b19, h0);
+ __ Sqxtn(h20, s0);
+ __ Sqxtn(s21, d0);
END();
@@ -18017,15 +18667,15 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Uqxtn(v16.V8B(), v0.V8H());
+ __ Uqxtn(v16.V8B(), v0.V8H());
__ Uqxtn2(v16.V16B(), v1.V8H());
- __ Uqxtn(v17.V4H(), v1.V4S());
- __ Uqxtn2(v17.V8H(), v2.V4S());
- __ Uqxtn(v18.V2S(), v3.V2D());
- __ Uqxtn2(v18.V4S(), v4.V2D());
- __ Uqxtn(b19, h0);
- __ Uqxtn(h20, s0);
- __ Uqxtn(s21, d0);
+ __ Uqxtn(v17.V4H(), v1.V4S());
+ __ Uqxtn2(v17.V8H(), v2.V4S());
+ __ Uqxtn(v18.V2S(), v3.V2D());
+ __ Uqxtn2(v18.V4S(), v4.V2D());
+ __ Uqxtn(b19, h0);
+ __ Uqxtn(h20, s0);
+ __ Uqxtn(s21, d0);
END();
@@ -18051,15 +18701,15 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqxtun(v16.V8B(), v0.V8H());
+ __ Sqxtun(v16.V8B(), v0.V8H());
__ Sqxtun2(v16.V16B(), v1.V8H());
- __ Sqxtun(v17.V4H(), v1.V4S());
- __ Sqxtun2(v17.V8H(), v2.V4S());
- __ Sqxtun(v18.V2S(), v3.V2D());
- __ Sqxtun2(v18.V4S(), v4.V2D());
- __ Sqxtun(b19, h0);
- __ Sqxtun(h20, s0);
- __ Sqxtun(s21, d0);
+ __ Sqxtun(v17.V4H(), v1.V4S());
+ __ Sqxtun2(v17.V8H(), v2.V4S());
+ __ Sqxtun(v18.V2S(), v3.V2D());
+ __ Sqxtun2(v18.V4S(), v4.V2D());
+ __ Sqxtun(b19, h0);
+ __ Sqxtun(h20, s0);
+ __ Sqxtun(s21, d0);
END();
@@ -18083,8 +18733,8 @@
__ And(v16.V16B(), v0.V16B(), v0.V16B()); // self test
__ And(v17.V16B(), v0.V16B(), v1.V16B()); // all combinations
- __ And(v24.V8B(), v0.V8B(), v0.V8B()); // self test
- __ And(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
+ __ And(v24.V8B(), v0.V8B(), v0.V8B()); // self test
+ __ And(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
END();
RUN();
@@ -18105,8 +18755,8 @@
__ Bic(v16.V16B(), v0.V16B(), v0.V16B()); // self test
__ Bic(v17.V16B(), v0.V16B(), v1.V16B()); // all combinations
- __ Bic(v24.V8B(), v0.V8B(), v0.V8B()); // self test
- __ Bic(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
+ __ Bic(v24.V8B(), v0.V8B(), v0.V8B()); // self test
+ __ Bic(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
END();
RUN();
@@ -18127,8 +18777,8 @@
__ Orr(v16.V16B(), v0.V16B(), v0.V16B()); // self test
__ Orr(v17.V16B(), v0.V16B(), v1.V16B()); // all combinations
- __ Orr(v24.V8B(), v0.V8B(), v0.V8B()); // self test
- __ Orr(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
+ __ Orr(v24.V8B(), v0.V8B(), v0.V8B()); // self test
+ __ Orr(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
END();
RUN();
@@ -18180,8 +18830,8 @@
__ Orn(v16.V16B(), v0.V16B(), v0.V16B()); // self test
__ Orn(v17.V16B(), v0.V16B(), v1.V16B()); // all combinations
- __ Orn(v24.V8B(), v0.V8B(), v0.V8B()); // self test
- __ Orn(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
+ __ Orn(v24.V8B(), v0.V8B(), v0.V8B()); // self test
+ __ Orn(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
END();
RUN();
@@ -18202,8 +18852,8 @@
__ Eor(v16.V16B(), v0.V16B(), v0.V16B()); // self test
__ Eor(v17.V16B(), v0.V16B(), v1.V16B()); // all combinations
- __ Eor(v24.V8B(), v0.V8B(), v0.V8B()); // self test
- __ Eor(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
+ __ Eor(v24.V8B(), v0.V8B(), v0.V8B()); // self test
+ __ Eor(v25.V8B(), v0.V8B(), v1.V8B()); // all combinations
END();
RUN();
@@ -18727,21 +19377,21 @@
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
__ Movi(v1.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
- __ Cls(v16.V8B() , v1.V8B());
+ __ Cls(v16.V8B(), v1.V8B());
__ Cls(v17.V16B(), v1.V16B());
- __ Cls(v18.V4H() , v1.V4H());
- __ Cls(v19.V8H() , v1.V8H());
- __ Cls(v20.V2S() , v1.V2S());
- __ Cls(v21.V4S() , v1.V4S());
+ __ Cls(v18.V4H(), v1.V4H());
+ __ Cls(v19.V8H(), v1.V8H());
+ __ Cls(v20.V2S(), v1.V2S());
+ __ Cls(v21.V4S(), v1.V4S());
- __ Clz(v22.V8B() , v0.V8B());
+ __ Clz(v22.V8B(), v0.V8B());
__ Clz(v23.V16B(), v0.V16B());
- __ Clz(v24.V4H() , v0.V4H());
- __ Clz(v25.V8H() , v0.V8H());
- __ Clz(v26.V2S() , v0.V2S());
- __ Clz(v27.V4S() , v0.V4S());
+ __ Clz(v24.V4H(), v0.V4H());
+ __ Clz(v25.V8H(), v0.V8H());
+ __ Clz(v26.V2S(), v0.V2S());
+ __ Clz(v27.V4S(), v0.V4S());
- __ Cnt(v28.V8B() , v0.V8B());
+ __ Cnt(v28.V8B(), v0.V8B());
__ Cnt(v29.V16B(), v1.V16B());
END();
@@ -18776,22 +19426,22 @@
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
__ Movi(v1.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
- __ Rev16(v16.V8B() , v0.V8B());
+ __ Rev16(v16.V8B(), v0.V8B());
__ Rev16(v17.V16B(), v0.V16B());
- __ Rev32(v18.V8B() , v0.V8B());
+ __ Rev32(v18.V8B(), v0.V8B());
__ Rev32(v19.V16B(), v0.V16B());
- __ Rev32(v20.V4H() , v0.V4H());
- __ Rev32(v21.V8H() , v0.V8H());
+ __ Rev32(v20.V4H(), v0.V4H());
+ __ Rev32(v21.V8H(), v0.V8H());
- __ Rev64(v22.V8B() , v0.V8B());
+ __ Rev64(v22.V8B(), v0.V8B());
__ Rev64(v23.V16B(), v0.V16B());
- __ Rev64(v24.V4H() , v0.V4H());
- __ Rev64(v25.V8H() , v0.V8H());
- __ Rev64(v26.V2S() , v0.V2S());
- __ Rev64(v27.V4S() , v0.V4S());
+ __ Rev64(v24.V4H(), v0.V4H());
+ __ Rev64(v25.V8H(), v0.V8H());
+ __ Rev64(v26.V2S(), v0.V2S());
+ __ Rev64(v27.V4S(), v0.V4S());
- __ Rbit(v28.V8B() , v1.V8B());
+ __ Rbit(v28.V8B(), v1.V8B());
__ Rbit(v29.V16B(), v1.V16B());
END();
@@ -18828,24 +19478,24 @@
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
__ Movi(v1.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v0.V2D());
- __ Mov(v19.V2D(), v0.V2D());
- __ Mov(v20.V2D(), v0.V2D());
- __ Mov(v21.V2D(), v0.V2D());
- __ Mov(v22.V2D(), v0.V2D());
- __ Mov(v23.V2D(), v0.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v0.V2D());
+ __ Mov(v19.V2D(), v0.V2D());
+ __ Mov(v20.V2D(), v0.V2D());
+ __ Mov(v21.V2D(), v0.V2D());
+ __ Mov(v22.V2D(), v0.V2D());
+ __ Mov(v23.V2D(), v0.V2D());
- __ Sli(v16.V8B(), v1.V8B(), 4);
+ __ Sli(v16.V8B(), v1.V8B(), 4);
__ Sli(v17.V16B(), v1.V16B(), 7);
- __ Sli(v18.V4H(), v1.V4H(), 8);
- __ Sli(v19.V8H(), v1.V8H(), 15);
- __ Sli(v20.V2S(), v1.V2S(), 0);
- __ Sli(v21.V4S(), v1.V4S(), 31);
- __ Sli(v22.V2D(), v1.V2D(), 48);
+ __ Sli(v18.V4H(), v1.V4H(), 8);
+ __ Sli(v19.V8H(), v1.V8H(), 15);
+ __ Sli(v20.V2S(), v1.V2S(), 0);
+ __ Sli(v21.V4S(), v1.V4S(), 31);
+ __ Sli(v22.V2D(), v1.V2D(), 48);
- __ Sli(d23, d1, 48);
+ __ Sli(d23, d1, 48);
END();
@@ -18874,24 +19524,24 @@
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
__ Movi(v1.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v0.V2D());
- __ Mov(v19.V2D(), v0.V2D());
- __ Mov(v20.V2D(), v0.V2D());
- __ Mov(v21.V2D(), v0.V2D());
- __ Mov(v22.V2D(), v0.V2D());
- __ Mov(v23.V2D(), v0.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v0.V2D());
+ __ Mov(v19.V2D(), v0.V2D());
+ __ Mov(v20.V2D(), v0.V2D());
+ __ Mov(v21.V2D(), v0.V2D());
+ __ Mov(v22.V2D(), v0.V2D());
+ __ Mov(v23.V2D(), v0.V2D());
- __ Sri(v16.V8B(), v1.V8B(), 4);
+ __ Sri(v16.V8B(), v1.V8B(), 4);
__ Sri(v17.V16B(), v1.V16B(), 7);
- __ Sri(v18.V4H(), v1.V4H(), 8);
- __ Sri(v19.V8H(), v1.V8H(), 15);
- __ Sri(v20.V2S(), v1.V2S(), 1);
- __ Sri(v21.V4S(), v1.V4S(), 31);
- __ Sri(v22.V2D(), v1.V2D(), 48);
+ __ Sri(v18.V4H(), v1.V4H(), 8);
+ __ Sri(v19.V8H(), v1.V8H(), 15);
+ __ Sri(v20.V2S(), v1.V2S(), 1);
+ __ Sri(v21.V4S(), v1.V4S(), 31);
+ __ Sri(v22.V2D(), v1.V2D(), 48);
- __ Sri(d23, d1, 48);
+ __ Sri(d23, d1, 48);
END();
@@ -18923,12 +19573,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Shrn(v16.V8B(), v0.V8H(), 8);
+ __ Shrn(v16.V8B(), v0.V8H(), 8);
__ Shrn2(v16.V16B(), v1.V8H(), 1);
- __ Shrn(v17.V4H(), v1.V4S(), 16);
- __ Shrn2(v17.V8H(), v2.V4S(), 1);
- __ Shrn(v18.V2S(), v3.V2D(), 32);
- __ Shrn2(v18.V4S(), v3.V2D(), 1);
+ __ Shrn(v17.V4H(), v1.V4S(), 16);
+ __ Shrn2(v17.V8H(), v2.V4S(), 1);
+ __ Shrn(v18.V2S(), v3.V2D(), 32);
+ __ Shrn2(v18.V4S(), v3.V2D(), 1);
END();
@@ -18951,12 +19601,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Rshrn(v16.V8B(), v0.V8H(), 8);
+ __ Rshrn(v16.V8B(), v0.V8H(), 8);
__ Rshrn2(v16.V16B(), v1.V8H(), 1);
- __ Rshrn(v17.V4H(), v1.V4S(), 16);
- __ Rshrn2(v17.V8H(), v2.V4S(), 1);
- __ Rshrn(v18.V2S(), v3.V2D(), 32);
- __ Rshrn2(v18.V4S(), v3.V2D(), 1);
+ __ Rshrn(v17.V4H(), v1.V4S(), 16);
+ __ Rshrn2(v17.V8H(), v2.V4S(), 1);
+ __ Rshrn(v18.V2S(), v3.V2D(), 32);
+ __ Rshrn2(v18.V4S(), v3.V2D(), 1);
END();
@@ -18979,12 +19629,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Uqshrn(v16.V8B(), v0.V8H(), 8);
+ __ Uqshrn(v16.V8B(), v0.V8H(), 8);
__ Uqshrn2(v16.V16B(), v1.V8H(), 1);
- __ Uqshrn(v17.V4H(), v1.V4S(), 16);
- __ Uqshrn2(v17.V8H(), v2.V4S(), 1);
- __ Uqshrn(v18.V2S(), v3.V2D(), 32);
- __ Uqshrn2(v18.V4S(), v3.V2D(), 1);
+ __ Uqshrn(v17.V4H(), v1.V4S(), 16);
+ __ Uqshrn2(v17.V8H(), v2.V4S(), 1);
+ __ Uqshrn(v18.V2S(), v3.V2D(), 32);
+ __ Uqshrn2(v18.V4S(), v3.V2D(), 1);
__ Uqshrn(b19, h0, 8);
__ Uqshrn(h20, s1, 16);
@@ -19014,12 +19664,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Uqrshrn(v16.V8B(), v0.V8H(), 8);
+ __ Uqrshrn(v16.V8B(), v0.V8H(), 8);
__ Uqrshrn2(v16.V16B(), v1.V8H(), 1);
- __ Uqrshrn(v17.V4H(), v1.V4S(), 16);
- __ Uqrshrn2(v17.V8H(), v2.V4S(), 1);
- __ Uqrshrn(v18.V2S(), v3.V2D(), 32);
- __ Uqrshrn2(v18.V4S(), v3.V2D(), 1);
+ __ Uqrshrn(v17.V4H(), v1.V4S(), 16);
+ __ Uqrshrn2(v17.V8H(), v2.V4S(), 1);
+ __ Uqrshrn(v18.V2S(), v3.V2D(), 32);
+ __ Uqrshrn2(v18.V4S(), v3.V2D(), 1);
__ Uqrshrn(b19, h0, 8);
__ Uqrshrn(h20, s1, 16);
@@ -19049,12 +19699,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqshrn(v16.V8B(), v0.V8H(), 8);
+ __ Sqshrn(v16.V8B(), v0.V8H(), 8);
__ Sqshrn2(v16.V16B(), v1.V8H(), 1);
- __ Sqshrn(v17.V4H(), v1.V4S(), 16);
- __ Sqshrn2(v17.V8H(), v2.V4S(), 1);
- __ Sqshrn(v18.V2S(), v3.V2D(), 32);
- __ Sqshrn2(v18.V4S(), v3.V2D(), 1);
+ __ Sqshrn(v17.V4H(), v1.V4S(), 16);
+ __ Sqshrn2(v17.V8H(), v2.V4S(), 1);
+ __ Sqshrn(v18.V2S(), v3.V2D(), 32);
+ __ Sqshrn2(v18.V4S(), v3.V2D(), 1);
__ Sqshrn(b19, h0, 8);
__ Sqshrn(h20, s1, 16);
@@ -19084,12 +19734,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqrshrn(v16.V8B(), v0.V8H(), 8);
+ __ Sqrshrn(v16.V8B(), v0.V8H(), 8);
__ Sqrshrn2(v16.V16B(), v1.V8H(), 1);
- __ Sqrshrn(v17.V4H(), v1.V4S(), 16);
- __ Sqrshrn2(v17.V8H(), v2.V4S(), 1);
- __ Sqrshrn(v18.V2S(), v3.V2D(), 32);
- __ Sqrshrn2(v18.V4S(), v3.V2D(), 1);
+ __ Sqrshrn(v17.V4H(), v1.V4S(), 16);
+ __ Sqrshrn2(v17.V8H(), v2.V4S(), 1);
+ __ Sqrshrn(v18.V2S(), v3.V2D(), 32);
+ __ Sqrshrn2(v18.V4S(), v3.V2D(), 1);
__ Sqrshrn(b19, h0, 8);
__ Sqrshrn(h20, s1, 16);
@@ -19119,12 +19769,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqshrun(v16.V8B(), v0.V8H(), 8);
+ __ Sqshrun(v16.V8B(), v0.V8H(), 8);
__ Sqshrun2(v16.V16B(), v1.V8H(), 1);
- __ Sqshrun(v17.V4H(), v1.V4S(), 16);
- __ Sqshrun2(v17.V8H(), v2.V4S(), 1);
- __ Sqshrun(v18.V2S(), v3.V2D(), 32);
- __ Sqshrun2(v18.V4S(), v3.V2D(), 1);
+ __ Sqshrun(v17.V4H(), v1.V4S(), 16);
+ __ Sqshrun2(v17.V8H(), v2.V4S(), 1);
+ __ Sqshrun(v18.V2S(), v3.V2D(), 32);
+ __ Sqshrun2(v18.V4S(), v3.V2D(), 1);
__ Sqshrun(b19, h0, 8);
__ Sqshrun(h20, s1, 16);
@@ -19154,12 +19804,12 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Sqrshrun(v16.V8B(), v0.V8H(), 8);
+ __ Sqrshrun(v16.V8B(), v0.V8H(), 8);
__ Sqrshrun2(v16.V16B(), v1.V8H(), 1);
- __ Sqrshrun(v17.V4H(), v1.V4S(), 16);
- __ Sqrshrun2(v17.V8H(), v2.V4S(), 1);
- __ Sqrshrun(v18.V2S(), v3.V2D(), 32);
- __ Sqrshrun2(v18.V4S(), v3.V2D(), 1);
+ __ Sqrshrun(v17.V4H(), v1.V4S(), 16);
+ __ Sqrshrun2(v17.V8H(), v2.V4S(), 1);
+ __ Sqrshrun(v18.V2S(), v3.V2D(), 32);
+ __ Sqrshrun2(v18.V4S(), v3.V2D(), 1);
__ Sqrshrun(b19, h0, 8);
__ Sqrshrun(h20, s1, 16);
@@ -19344,10 +19994,10 @@
START();
- __ Movi(v0.V8B(), 0xaa);
+ __ Movi(v0.V8B(), 0xaa);
__ Movi(v1.V16B(), 0x55);
- __ Movi(d2, 0x00ffff0000ffffff);
+ __ Movi(d2, 0x00ffff0000ffffff);
__ Movi(v3.V2D(), 0x00ffff0000ffffff);
__ Movi(v16.V4H(), 0x00, LSL, 0);
@@ -19614,9 +20264,9 @@
__ Movi(v5.V2D(), 0x0011223344556677, 0x0123456789abcdef);
__ Dup(v16.V16B(), v0.B(), 0);
- __ Dup(v17.V8H(), v1.H(), 7);
- __ Dup(v18.V4S(), v1.S(), 3);
- __ Dup(v19.V2D(), v0.D(), 0);
+ __ Dup(v17.V8H(), v1.H(), 7);
+ __ Dup(v18.V4S(), v1.S(), 3);
+ __ Dup(v19.V2D(), v0.D(), 0);
__ Dup(v20.V8B(), v0.B(), 0);
__ Dup(v21.V4H(), v1.H(), 7);
@@ -19628,9 +20278,9 @@
__ Dup(v26.D(), v0.D(), 0);
__ Dup(v2.V16B(), v2.B(), 0);
- __ Dup(v3.V8H(), v3.H(), 7);
- __ Dup(v4.V4S(), v4.S(), 0);
- __ Dup(v5.V2D(), v5.D(), 1);
+ __ Dup(v3.V8H(), v3.H(), 7);
+ __ Dup(v4.V4S(), v4.S(), 0);
+ __ Dup(v5.V2D(), v5.D(), 1);
END();
@@ -19666,18 +20316,18 @@
__ Mov(x0, 0x0011223344556677);
__ Dup(v16.V16B(), w0);
- __ Dup(v17.V8H(), w0);
- __ Dup(v18.V4S(), w0);
- __ Dup(v19.V2D(), x0);
+ __ Dup(v17.V8H(), w0);
+ __ Dup(v18.V4S(), w0);
+ __ Dup(v19.V2D(), x0);
__ Dup(v20.V8B(), w0);
__ Dup(v21.V4H(), w0);
__ Dup(v22.V2S(), w0);
__ Dup(v2.V16B(), wzr);
- __ Dup(v3.V8H(), wzr);
- __ Dup(v4.V4S(), wzr);
- __ Dup(v5.V2D(), xzr);
+ __ Dup(v3.V8H(), wzr);
+ __ Dup(v4.V4S(), wzr);
+ __ Dup(v5.V2D(), xzr);
END();
@@ -19705,8 +20355,8 @@
START();
- __ Movi(v0.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
- __ Movi(v1.V2D(), 0xffeddccbbaae9988, 0x7766554433221100);
+ __ Movi(v0.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
+ __ Movi(v1.V2D(), 0xffeddccbbaae9988, 0x7766554433221100);
__ Movi(v16.V2D(), 0x0123456789abcdef, 0xfedcba9876543210);
__ Movi(v17.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
__ Movi(v18.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
@@ -19718,14 +20368,14 @@
__ Movi(v5.V2D(), 0, 0x0123456789abcdef);
__ Ins(v16.V16B(), 15, v0.V16B(), 0);
- __ Ins(v17.V8H(), 0, v1.V8H(), 7);
- __ Ins(v18.V4S(), 3, v1.V4S(), 0);
- __ Ins(v19.V2D(), 1, v0.V2D(), 0);
+ __ Ins(v17.V8H(), 0, v1.V8H(), 7);
+ __ Ins(v18.V4S(), 3, v1.V4S(), 0);
+ __ Ins(v19.V2D(), 1, v0.V2D(), 0);
__ Ins(v2.V16B(), 2, v2.V16B(), 0);
- __ Ins(v3.V8H(), 0, v3.V8H(), 7);
- __ Ins(v4.V4S(), 3, v4.V4S(), 0);
- __ Ins(v5.V2D(), 0, v5.V2D(), 1);
+ __ Ins(v3.V8H(), 0, v3.V8H(), 7);
+ __ Ins(v4.V4S(), 3, v4.V4S(), 0);
+ __ Ins(v5.V2D(), 0, v5.V2D(), 1);
END();
@@ -19749,8 +20399,8 @@
START();
- __ Movi(v0.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
- __ Movi(v1.V2D(), 0xffeddccbbaae9988, 0x7766554433221100);
+ __ Movi(v0.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
+ __ Movi(v1.V2D(), 0xffeddccbbaae9988, 0x7766554433221100);
__ Movi(v16.V2D(), 0x0123456789abcdef, 0xfedcba9876543210);
__ Movi(v17.V2D(), 0xfedcba9876543210, 0x0123456789abcdef);
__ Movi(v18.V2D(), 0x0011223344556677, 0x8899aabbccddeeff);
@@ -19762,14 +20412,14 @@
__ Movi(v5.V2D(), 0, 0x0123456789abcdef);
__ Mov(v16.V16B(), 15, v0.V16B(), 0);
- __ Mov(v17.V8H(), 0, v1.V8H(), 7);
- __ Mov(v18.V4S(), 3, v1.V4S(), 0);
- __ Mov(v19.V2D(), 1, v0.V2D(), 0);
+ __ Mov(v17.V8H(), 0, v1.V8H(), 7);
+ __ Mov(v18.V4S(), 3, v1.V4S(), 0);
+ __ Mov(v19.V2D(), 1, v0.V2D(), 0);
__ Mov(v2.V16B(), 2, v2.V16B(), 0);
- __ Mov(v3.V8H(), 0, v3.V8H(), 7);
- __ Mov(v4.V4S(), 3, v4.V4S(), 0);
- __ Mov(v5.V2D(), 0, v5.V2D(), 1);
+ __ Mov(v3.V8H(), 0, v3.V8H(), 7);
+ __ Mov(v4.V4S(), 3, v4.V4S(), 0);
+ __ Mov(v5.V2D(), 0, v5.V2D(), 1);
END();
@@ -19795,20 +20445,20 @@
__ Movi(v0.V2D(), 0x0123456789abcdef, 0xfedcba9876543210);
- __ Smov(w0, v0.B(), 7);
+ __ Smov(w0, v0.B(), 7);
__ Smov(w1, v0.B(), 15);
- __ Smov(w2, v0.H(), 0);
- __ Smov(w3, v0.H(), 3);
+ __ Smov(w2, v0.H(), 0);
+ __ Smov(w3, v0.H(), 3);
- __ Smov(x4, v0.B(), 7);
- __ Smov(x5, v0.B(), 15);
+ __ Smov(x4, v0.B(), 7);
+ __ Smov(x5, v0.B(), 15);
- __ Smov(x6, v0.H(), 0);
- __ Smov(x7, v0.H(), 3);
+ __ Smov(x6, v0.H(), 0);
+ __ Smov(x7, v0.H(), 3);
- __ Smov(x16, v0.S(), 0);
- __ Smov(x17, v0.S(), 1);
+ __ Smov(x16, v0.S(), 0);
+ __ Smov(x17, v0.S(), 1);
END();
@@ -19837,12 +20487,12 @@
__ Movi(v0.V2D(), 0x0123456789abcdef, 0xfedcba9876543210);
__ Umov(w0, v0.B(), 15);
- __ Umov(w1, v0.H(), 0);
- __ Umov(w2, v0.S(), 3);
- __ Umov(x3, v0.D(), 1);
+ __ Umov(w1, v0.H(), 0);
+ __ Umov(w2, v0.S(), 3);
+ __ Umov(x3, v0.D(), 1);
- __ Mov(w4, v0.S(), 3);
- __ Mov(x5, v0.D(), 1);
+ __ Mov(w4, v0.S(), 3);
+ __ Mov(x5, v0.D(), 1);
END();
@@ -19876,14 +20526,14 @@
__ Movi(v5.V2D(), 0, 0x0123456789abcdef);
__ Ins(v16.V16B(), 15, w0);
- __ Ins(v17.V8H(), 0, w0);
- __ Ins(v18.V4S(), 3, w0);
- __ Ins(v19.V2D(), 0, x0);
+ __ Ins(v17.V8H(), 0, w0);
+ __ Ins(v18.V4S(), 3, w0);
+ __ Ins(v19.V2D(), 0, x0);
__ Ins(v2.V16B(), 2, w0);
- __ Ins(v3.V8H(), 0, w0);
- __ Ins(v4.V4S(), 3, w0);
- __ Ins(v5.V2D(), 1, x0);
+ __ Ins(v3.V8H(), 0, w0);
+ __ Ins(v4.V4S(), 3, w0);
+ __ Ins(v5.V2D(), 1, x0);
END();
@@ -19920,8 +20570,8 @@
__ Ext(v18.V8B(), v2.V8B(), v3.V8B(), 0);
__ Ext(v19.V8B(), v2.V8B(), v3.V8B(), 7);
- __ Ext(v2.V8B(), v2.V8B(), v3.V8B(), 4); // Dest is same as one Src
- __ Ext(v3.V8B(), v3.V8B(), v3.V8B(), 4); // All reg are the same
+ __ Ext(v2.V8B(), v2.V8B(), v3.V8B(), 4); // Dest is same as one Src
+ __ Ext(v3.V8B(), v3.V8B(), v3.V8B(), 4); // All reg are the same
END();
@@ -19994,13 +20644,13 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Addhn(v16.V8B(), v0.V8H(), v1.V8H());
+ __ Addhn(v16.V8B(), v0.V8H(), v1.V8H());
__ Addhn2(v16.V16B(), v2.V8H(), v3.V8H());
- __ Raddhn(v17.V8B(), v0.V8H(), v1.V8H());
+ __ Raddhn(v17.V8B(), v0.V8H(), v1.V8H());
__ Raddhn2(v17.V16B(), v2.V8H(), v3.V8H());
- __ Subhn(v18.V8B(), v0.V8H(), v1.V8H());
+ __ Subhn(v18.V8B(), v0.V8H(), v1.V8H());
__ Subhn2(v18.V16B(), v2.V8H(), v3.V8H());
- __ Rsubhn(v19.V8B(), v0.V8H(), v1.V8H());
+ __ Rsubhn(v19.V8B(), v0.V8H(), v1.V8H());
__ Rsubhn2(v19.V16B(), v2.V8H(), v3.V8H());
END();
@@ -20243,14 +20893,14 @@
__ Movi(v1.V2D(), 0x80008001ffff0000, 0xffff000000017fff);
__ Movi(v2.V2D(), 0x80000000ffffffff, 0x000000007fffffff);
- __ Sshll(v16.V8H(), v0.V8B(), 4);
+ __ Sshll(v16.V8H(), v0.V8B(), 4);
__ Sshll2(v17.V8H(), v0.V16B(), 4);
- __ Sshll(v18.V4S(), v1.V4H(), 8);
- __ Sshll2(v19.V4S(), v1.V8H(), 8);
+ __ Sshll(v18.V4S(), v1.V4H(), 8);
+ __ Sshll2(v19.V4S(), v1.V8H(), 8);
- __ Sshll(v20.V2D(), v2.V2S(), 16);
- __ Sshll2(v21.V2D(), v2.V4S(), 16);
+ __ Sshll(v20.V2D(), v2.V2S(), 16);
+ __ Sshll2(v21.V2D(), v2.V4S(), 16);
END();
@@ -20274,14 +20924,14 @@
__ Movi(v1.V2D(), 0x80008001ffff0000, 0xffff000000017fff);
__ Movi(v2.V2D(), 0x80000000ffffffff, 0x000000007fffffff);
- __ Shll(v16.V8H(), v0.V8B(), 8);
+ __ Shll(v16.V8H(), v0.V8B(), 8);
__ Shll2(v17.V8H(), v0.V16B(), 8);
- __ Shll(v18.V4S(), v1.V4H(), 16);
- __ Shll2(v19.V4S(), v1.V8H(), 16);
+ __ Shll(v18.V4S(), v1.V4H(), 16);
+ __ Shll2(v19.V4S(), v1.V8H(), 16);
- __ Shll(v20.V2D(), v2.V2S(), 32);
- __ Shll2(v21.V2D(), v2.V4S(), 32);
+ __ Shll(v20.V2D(), v2.V2S(), 32);
+ __ Shll2(v21.V2D(), v2.V4S(), 32);
END();
@@ -20305,14 +20955,14 @@
__ Movi(v1.V2D(), 0x80008001ffff0000, 0xffff000000017fff);
__ Movi(v2.V2D(), 0x80000000ffffffff, 0x000000007fffffff);
- __ Ushll(v16.V8H(), v0.V8B(), 4);
+ __ Ushll(v16.V8H(), v0.V8B(), 4);
__ Ushll2(v17.V8H(), v0.V16B(), 4);
- __ Ushll(v18.V4S(), v1.V4H(), 8);
- __ Ushll2(v19.V4S(), v1.V8H(), 8);
+ __ Ushll(v18.V4S(), v1.V4H(), 8);
+ __ Ushll2(v19.V4S(), v1.V8H(), 8);
- __ Ushll(v20.V2D(), v2.V2S(), 16);
- __ Ushll2(v21.V2D(), v2.V4S(), 16);
+ __ Ushll(v20.V2D(), v2.V2S(), 16);
+ __ Ushll2(v21.V2D(), v2.V4S(), 16);
END();
@@ -20340,11 +20990,11 @@
__ Sxtl(v16.V8H(), v0.V8B());
__ Sxtl2(v17.V8H(), v0.V16B());
- __ Sxtl(v18.V4S(), v1.V4H());
- __ Sxtl2(v19.V4S(), v1.V8H());
+ __ Sxtl(v18.V4S(), v1.V4H());
+ __ Sxtl2(v19.V4S(), v1.V8H());
- __ Sxtl(v20.V2D(), v2.V2S());
- __ Sxtl2(v21.V2D(), v2.V4S());
+ __ Sxtl(v20.V2D(), v2.V2S());
+ __ Sxtl2(v21.V2D(), v2.V4S());
END();
@@ -20372,11 +21022,11 @@
__ Uxtl(v16.V8H(), v0.V8B());
__ Uxtl2(v17.V8H(), v0.V16B());
- __ Uxtl(v18.V4S(), v1.V4H());
- __ Uxtl2(v19.V4S(), v1.V8H());
+ __ Uxtl(v18.V4S(), v1.V4H());
+ __ Uxtl2(v19.V4S(), v1.V8H());
- __ Uxtl(v20.V2D(), v2.V2S());
- __ Uxtl2(v21.V2D(), v2.V4S());
+ __ Uxtl(v20.V2D(), v2.V2S());
+ __ Uxtl2(v21.V2D(), v2.V4S());
END();
@@ -20403,30 +21053,30 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v1.V2D());
- __ Mov(v19.V2D(), v1.V2D());
- __ Mov(v20.V2D(), v2.V2D());
- __ Mov(v21.V2D(), v2.V2D());
- __ Mov(v22.V2D(), v3.V2D());
- __ Mov(v23.V2D(), v4.V2D());
- __ Mov(v24.V2D(), v3.V2D());
- __ Mov(v25.V2D(), v4.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v1.V2D());
+ __ Mov(v19.V2D(), v1.V2D());
+ __ Mov(v20.V2D(), v2.V2D());
+ __ Mov(v21.V2D(), v2.V2D());
+ __ Mov(v22.V2D(), v3.V2D());
+ __ Mov(v23.V2D(), v4.V2D());
+ __ Mov(v24.V2D(), v3.V2D());
+ __ Mov(v25.V2D(), v4.V2D());
- __ Ssra(v16.V8B(), v0.V8B(), 4);
+ __ Ssra(v16.V8B(), v0.V8B(), 4);
__ Ssra(v17.V16B(), v0.V16B(), 4);
- __ Ssra(v18.V4H(), v1.V4H(), 8);
- __ Ssra(v19.V8H(), v1.V8H(), 8);
+ __ Ssra(v18.V4H(), v1.V4H(), 8);
+ __ Ssra(v19.V8H(), v1.V8H(), 8);
- __ Ssra(v20.V2S(), v2.V2S(), 16);
- __ Ssra(v21.V4S(), v2.V4S(), 16);
+ __ Ssra(v20.V2S(), v2.V2S(), 16);
+ __ Ssra(v21.V4S(), v2.V4S(), 16);
- __ Ssra(v22.V2D(), v3.V2D(), 32);
- __ Ssra(v23.V2D(), v4.V2D(), 32);
+ __ Ssra(v22.V2D(), v3.V2D(), 32);
+ __ Ssra(v23.V2D(), v4.V2D(), 32);
- __ Ssra(d24, d3, 48);
+ __ Ssra(d24, d3, 48);
END();
@@ -20455,30 +21105,30 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v1.V2D());
- __ Mov(v19.V2D(), v1.V2D());
- __ Mov(v20.V2D(), v2.V2D());
- __ Mov(v21.V2D(), v2.V2D());
- __ Mov(v22.V2D(), v3.V2D());
- __ Mov(v23.V2D(), v4.V2D());
- __ Mov(v24.V2D(), v3.V2D());
- __ Mov(v25.V2D(), v4.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v1.V2D());
+ __ Mov(v19.V2D(), v1.V2D());
+ __ Mov(v20.V2D(), v2.V2D());
+ __ Mov(v21.V2D(), v2.V2D());
+ __ Mov(v22.V2D(), v3.V2D());
+ __ Mov(v23.V2D(), v4.V2D());
+ __ Mov(v24.V2D(), v3.V2D());
+ __ Mov(v25.V2D(), v4.V2D());
- __ Srsra(v16.V8B(), v0.V8B(), 4);
+ __ Srsra(v16.V8B(), v0.V8B(), 4);
__ Srsra(v17.V16B(), v0.V16B(), 4);
- __ Srsra(v18.V4H(), v1.V4H(), 8);
- __ Srsra(v19.V8H(), v1.V8H(), 8);
+ __ Srsra(v18.V4H(), v1.V4H(), 8);
+ __ Srsra(v19.V8H(), v1.V8H(), 8);
- __ Srsra(v20.V2S(), v2.V2S(), 16);
- __ Srsra(v21.V4S(), v2.V4S(), 16);
+ __ Srsra(v20.V2S(), v2.V2S(), 16);
+ __ Srsra(v21.V4S(), v2.V4S(), 16);
- __ Srsra(v22.V2D(), v3.V2D(), 32);
- __ Srsra(v23.V2D(), v4.V2D(), 32);
+ __ Srsra(v22.V2D(), v3.V2D(), 32);
+ __ Srsra(v23.V2D(), v4.V2D(), 32);
- __ Srsra(d24, d3, 48);
+ __ Srsra(d24, d3, 48);
END();
@@ -20508,30 +21158,30 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v1.V2D());
- __ Mov(v19.V2D(), v1.V2D());
- __ Mov(v20.V2D(), v2.V2D());
- __ Mov(v21.V2D(), v2.V2D());
- __ Mov(v22.V2D(), v3.V2D());
- __ Mov(v23.V2D(), v4.V2D());
- __ Mov(v24.V2D(), v3.V2D());
- __ Mov(v25.V2D(), v4.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v1.V2D());
+ __ Mov(v19.V2D(), v1.V2D());
+ __ Mov(v20.V2D(), v2.V2D());
+ __ Mov(v21.V2D(), v2.V2D());
+ __ Mov(v22.V2D(), v3.V2D());
+ __ Mov(v23.V2D(), v4.V2D());
+ __ Mov(v24.V2D(), v3.V2D());
+ __ Mov(v25.V2D(), v4.V2D());
- __ Usra(v16.V8B(), v0.V8B(), 4);
+ __ Usra(v16.V8B(), v0.V8B(), 4);
__ Usra(v17.V16B(), v0.V16B(), 4);
- __ Usra(v18.V4H(), v1.V4H(), 8);
- __ Usra(v19.V8H(), v1.V8H(), 8);
+ __ Usra(v18.V4H(), v1.V4H(), 8);
+ __ Usra(v19.V8H(), v1.V8H(), 8);
- __ Usra(v20.V2S(), v2.V2S(), 16);
- __ Usra(v21.V4S(), v2.V4S(), 16);
+ __ Usra(v20.V2S(), v2.V2S(), 16);
+ __ Usra(v21.V4S(), v2.V4S(), 16);
- __ Usra(v22.V2D(), v3.V2D(), 32);
- __ Usra(v23.V2D(), v4.V2D(), 32);
+ __ Usra(v22.V2D(), v3.V2D(), 32);
+ __ Usra(v23.V2D(), v4.V2D(), 32);
- __ Usra(d24, d3, 48);
+ __ Usra(d24, d3, 48);
END();
@@ -20561,30 +21211,30 @@
__ Movi(v3.V2D(), 0x8000000000000001, 0x7fffffffffffffff);
__ Movi(v4.V2D(), 0x8000000000000000, 0x0000000000000000);
- __ Mov(v16.V2D(), v0.V2D());
- __ Mov(v17.V2D(), v0.V2D());
- __ Mov(v18.V2D(), v1.V2D());
- __ Mov(v19.V2D(), v1.V2D());
- __ Mov(v20.V2D(), v2.V2D());
- __ Mov(v21.V2D(), v2.V2D());
- __ Mov(v22.V2D(), v3.V2D());
- __ Mov(v23.V2D(), v4.V2D());
- __ Mov(v24.V2D(), v3.V2D());
- __ Mov(v25.V2D(), v4.V2D());
+ __ Mov(v16.V2D(), v0.V2D());
+ __ Mov(v17.V2D(), v0.V2D());
+ __ Mov(v18.V2D(), v1.V2D());
+ __ Mov(v19.V2D(), v1.V2D());
+ __ Mov(v20.V2D(), v2.V2D());
+ __ Mov(v21.V2D(), v2.V2D());
+ __ Mov(v22.V2D(), v3.V2D());
+ __ Mov(v23.V2D(), v4.V2D());
+ __ Mov(v24.V2D(), v3.V2D());
+ __ Mov(v25.V2D(), v4.V2D());
- __ Ursra(v16.V8B(), v0.V8B(), 4);
+ __ Ursra(v16.V8B(), v0.V8B(), 4);
__ Ursra(v17.V16B(), v0.V16B(), 4);
- __ Ursra(v18.V4H(), v1.V4H(), 8);
- __ Ursra(v19.V8H(), v1.V8H(), 8);
+ __ Ursra(v18.V4H(), v1.V4H(), 8);
+ __ Ursra(v19.V8H(), v1.V8H(), 8);
- __ Ursra(v20.V2S(), v2.V2S(), 16);
- __ Ursra(v21.V4S(), v2.V4S(), 16);
+ __ Ursra(v20.V2S(), v2.V2S(), 16);
+ __ Ursra(v21.V4S(), v2.V4S(), 16);
- __ Ursra(v22.V2D(), v3.V2D(), 32);
- __ Ursra(v23.V2D(), v4.V2D(), 32);
+ __ Ursra(v22.V2D(), v3.V2D(), 32);
+ __ Ursra(v23.V2D(), v4.V2D(), 32);
- __ Ursra(d24, d3, 48);
+ __ Ursra(d24, d3, 48);
END();
@@ -21161,7 +21811,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x5f058808, x11);
ASSERT_EQUAL_64(0x5f058808, x12);
ASSERT_EQUAL_64(0xedb88320, x13);
@@ -21203,7 +21853,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x0e848dba, x11);
ASSERT_EQUAL_64(0x0e848dba, x12);
ASSERT_EQUAL_64(0x3b83984b, x13);
@@ -21241,7 +21891,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x1d937b81, x11);
ASSERT_EQUAL_64(0xed59b63b, x13);
ASSERT_EQUAL_64(0x00be2612, x14);
@@ -21278,7 +21928,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x40797b92, x11);
ASSERT_EQUAL_64(0x533b85da, x13);
ASSERT_EQUAL_64(0xbc962670, x14);
@@ -21319,7 +21969,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x4851927d, x11);
ASSERT_EQUAL_64(0x4851927d, x12);
ASSERT_EQUAL_64(0x82f63b78, x13);
@@ -21361,7 +22011,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0xcef8494c, x11);
ASSERT_EQUAL_64(0xcef8494c, x12);
ASSERT_EQUAL_64(0xfbc3faf9, x13);
@@ -21399,7 +22049,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0xbcb79ece, x11);
ASSERT_EQUAL_64(0x52a0c93f, x13);
ASSERT_EQUAL_64(0x9f9b5c7a, x14);
@@ -21436,7 +22086,7 @@
END();
RUN();
- ASSERT_EQUAL_64(0x0, x10);
+ ASSERT_EQUAL_64(0x0, x10);
ASSERT_EQUAL_64(0x7f320fcb, x11);
ASSERT_EQUAL_64(0x34019664, x13);
ASSERT_EQUAL_64(0x6cc27dd0, x14);
@@ -21801,10 +22451,12 @@
// that are outside the immediate range of branch instructions.
// Take into account that backward branches can reach one instruction further
// than forward branches.
- const int overflow_size = kInstructionSize +
- std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
- std::max(Instruction::GetImmBranchForwardRange(CompareBranchType),
- Instruction::GetImmBranchForwardRange(CondBranchType)));
+ const int overflow_size =
+ kInstructionSize +
+ std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
+ std::max(Instruction::GetImmBranchForwardRange(
+ CompareBranchType),
+ Instruction::GetImmBranchForwardRange(CondBranchType)));
SETUP();
START();
@@ -21917,9 +22569,10 @@
// Test that the MacroAssembler correctly emits veneers for forward branches
// to labels that are outside the immediate range of branch instructions.
const int max_range =
- std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
- std::max(Instruction::GetImmBranchForwardRange(CompareBranchType),
- Instruction::GetImmBranchForwardRange(CondBranchType)));
+ std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
+ std::max(Instruction::GetImmBranchForwardRange(
+ CompareBranchType),
+ Instruction::GetImmBranchForwardRange(CondBranchType)));
SETUP();
START();
@@ -22084,7 +22737,7 @@
// We use different labels to prevent the MacroAssembler from sharing veneers.
Label labels[kNTotalBranches];
for (int i = 0; i < kNTotalBranches; i++) {
- new(&labels[i]) Label();
+ new (&labels[i]) Label();
}
for (int i = 0; i < n_bcond; i++) {
@@ -22202,10 +22855,7 @@
Literal<int64_t> automatically_placed_literal(1, masm.GetLiteralPool());
Literal<int64_t> manually_placed_literal(2);
{
- CodeBufferCheckScope scope(&masm,
- kInstructionSize + sizeof(int64_t),
- CodeBufferCheckScope::kReserveBufferSpace,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(&masm, kInstructionSize + sizeof(int64_t));
Label over_literal;
__ b(&over_literal);
__ place(&manually_placed_literal);
@@ -22401,11 +23051,16 @@
TEST(generic_operand) {
SETUP();
- int32_t data_32_array[5] = {
- 0xbadbeef, 0x11111111, 0xbadbeef, 0x33333333, 0xbadbeef };
- int64_t data_64_array[5] = {
- INT64_C(0xbadbadbadbeef), INT64_C(0x1111111111111111),
- INT64_C(0xbadbadbadbeef), INT64_C(0x3333333333333333), INT64_C(0xbadbadbadbeef) };
+ int32_t data_32_array[5] = {0xbadbeef,
+ 0x11111111,
+ 0xbadbeef,
+ 0x33333333,
+ 0xbadbeef};
+ int64_t data_64_array[5] = {INT64_C(0xbadbadbadbeef),
+ INT64_C(0x1111111111111111),
+ INT64_C(0xbadbadbadbeef),
+ INT64_C(0x3333333333333333),
+ INT64_C(0xbadbadbadbeef)};
size_t size_32 = sizeof(data_32_array[0]);
size_t size_64 = sizeof(data_64_array[0]);
@@ -22478,9 +23133,7 @@
}
-int32_t runtime_call_add_one(int32_t a) {
- return a + 1;
-}
+int32_t runtime_call_add_one(int32_t a) { return a + 1; }
double runtime_call_add_doubles(double a, double b, double c) {
return a + b + c;
@@ -22511,9 +23164,7 @@
return arg9 - arg10;
}
-void runtime_call_store_at_address(int64_t* address) {
- *address = 0xf00d;
-}
+void runtime_call_store_at_address(int64_t* address) { *address = 0xf00d; }
// Test feature detection of calls to runtime functions.
@@ -22522,12 +23173,14 @@
#if defined(VIXL_INCLUDE_SIMULATOR_AARCH64) && (__cplusplus >= 201103L) && \
(defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) && \
!defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT)
-#error "C++11 should be sufficient to provide support for simulated runtime calls."
+#error \
+ "C++11 should be sufficient to provide support for simulated runtime calls."
#endif // #if defined(VIXL_INCLUDE_SIMULATOR_AARCH64) && ...
#if (__cplusplus >= 201103L) && \
!defined(VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT)
-#error "C++11 should be sufficient to provide support for `MacroAssembler::CallRuntime()`."
+#error \
+ "C++11 should be sufficient to provide support for `MacroAssembler::CallRuntime()`."
#endif // #if (__cplusplus >= 201103L) && ...
#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
@@ -22624,5 +23277,44 @@
}
+TEST(static_register_types) {
+ SETUP();
+ START();
+
+ // [WX]Register implicitly casts to Register.
+ XRegister x_x0(0);
+ WRegister w_w0(0);
+ Register r_x0 = x_x0;
+ Register r_w0 = w_w0;
+ VIXL_CHECK(r_x0.Is(x_x0));
+ VIXL_CHECK(x_x0.Is(r_x0));
+ VIXL_CHECK(r_w0.Is(w_w0));
+ VIXL_CHECK(w_w0.Is(r_w0));
+
+ // Register explicitly casts to [WX]Register.
+ Register r_x1(1, kXRegSize);
+ Register r_w1(1, kWRegSize);
+ XRegister x_x1(r_x1);
+ WRegister w_w1(r_w1);
+ VIXL_CHECK(r_x1.Is(x_x1));
+ VIXL_CHECK(x_x1.Is(r_x1));
+ VIXL_CHECK(r_w1.Is(w_w1));
+ VIXL_CHECK(w_w1.Is(r_w1));
+
+ // [WX]Register implicitly casts to CPURegister.
+ XRegister x_x2(2);
+ WRegister w_w2(2);
+ CPURegister cpu_x2 = x_x2;
+ CPURegister cpu_w2 = w_w2;
+ VIXL_CHECK(cpu_x2.Is(x_x2));
+ VIXL_CHECK(x_x2.Is(cpu_x2));
+ VIXL_CHECK(cpu_w2.Is(w_w2));
+ VIXL_CHECK(w_w2.Is(cpu_w2));
+
+ END();
+ TEARDOWN();
+}
+
+
} // namespace aarch64
} // namespace vixl
diff --git a/test/aarch64/test-disasm-aarch64.cc b/test/aarch64/test-disasm-aarch64.cc
index a5bfeb2..824eb1e 100644
--- a/test/aarch64/test-disasm-aarch64.cc
+++ b/test/aarch64/test-disasm-aarch64.cc
@@ -34,26 +34,24 @@
#include "aarch64/disasm-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
-#define TEST(name) TEST_(AARCH64_DISASM_##name)
+#define TEST(name) TEST_(AARCH64_DISASM_##name)
-#define SETUP_CLASS(ASMCLASS) \
- uint32_t encoding = 0; \
- ASMCLASS masm; \
- Decoder decoder; \
- Disassembler disasm; \
+#define SETUP_COMMON() \
+ uint32_t encoding = 0; \
+ MacroAssembler masm; \
+ Decoder decoder; \
+ Disassembler disasm; \
decoder.AppendVisitor(&disasm)
-#define SETUP() SETUP_CLASS(Assembler)
-
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
// Run tests with the simulator.
-#define SETUP_MACRO() \
- SETUP_CLASS(MacroAssembler); \
+#define SETUP() \
+ SETUP_COMMON(); \
masm.SetGenerateSimulatorCode(true)
#else // ifdef VIXL_INCLUDE_SIMULATOR_AARCH64.
-#define SETUP_MACRO() \
- SETUP_CLASS(MacroAssembler); \
+#define SETUP() \
+ SETUP_COMMON(); \
masm.SetGenerateSimulatorCode(false)
#endif // ifdef VIXL_INCLUDE_SIMULATOR_AARCH64.
@@ -62,80 +60,88 @@
// tests.
#define MAX_SIZE_GENERATED 1024
-#define COMPARE(ASM, EXP) \
- masm.Reset(); \
- { \
- CodeBufferCheckScope guard(&masm, MAX_SIZE_GENERATED); \
- masm.ASM; \
- } \
- masm.FinalizeCode(); \
- decoder.Decode(masm.GetBuffer()->GetStartAddress<Instruction*>()); \
- encoding = *masm.GetBuffer()->GetStartAddress<uint32_t*>(); \
- if (strcmp(disasm.GetOutput(), EXP) != 0) { \
- printf("\nEncoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
- encoding, EXP, disasm.GetOutput()); \
- abort(); \
- } \
- if (Test::disassemble()) { \
- printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
+#define COMPARE(ASM, EXP) \
+ masm.Reset(); \
+ { \
+ ExactAssemblyScope guard(&masm, \
+ MAX_SIZE_GENERATED, \
+ ExactAssemblyScope::kMaximumSize); \
+ masm.ASM; \
+ } \
+ masm.FinalizeCode(); \
+ decoder.Decode(masm.GetBuffer()->GetStartAddress<Instruction*>()); \
+ encoding = *masm.GetBuffer()->GetStartAddress<uint32_t*>(); \
+ if (strcmp(disasm.GetOutput(), EXP) != 0) { \
+ printf("\nEncoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ encoding, \
+ EXP, \
+ disasm.GetOutput()); \
+ abort(); \
+ } \
+ if (Test::disassemble()) { \
+ printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
}
-#define COMPARE_PREFIX(ASM, EXP) \
- masm.Reset(); \
- { \
- CodeBufferCheckScope guard(&masm, MAX_SIZE_GENERATED); \
- masm.ASM; \
- } \
- masm.FinalizeCode(); \
- decoder.Decode(masm.GetBuffer()->GetStartAddress<Instruction*>()); \
- encoding = *masm.GetBuffer()->GetStartAddress<uint32_t*>(); \
- if (strncmp(disasm.GetOutput(), EXP, strlen(EXP)) != 0) { \
- printf("\nEncoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
- encoding, EXP, disasm.GetOutput()); \
- abort(); \
- } \
- if (Test::disassemble()) { \
- printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
+#define COMPARE_PREFIX(ASM, EXP) \
+ masm.Reset(); \
+ { \
+ ExactAssemblyScope guard(&masm, \
+ MAX_SIZE_GENERATED, \
+ ExactAssemblyScope::kMaximumSize); \
+ masm.ASM; \
+ } \
+ masm.FinalizeCode(); \
+ decoder.Decode(masm.GetBuffer()->GetStartAddress<Instruction*>()); \
+ encoding = *masm.GetBuffer()->GetStartAddress<uint32_t*>(); \
+ if (strncmp(disasm.GetOutput(), EXP, strlen(EXP)) != 0) { \
+ printf("\nEncoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ encoding, \
+ EXP, \
+ disasm.GetOutput()); \
+ abort(); \
+ } \
+ if (Test::disassemble()) { \
+ printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
}
-#define COMPARE_MACRO_BASE(ASM, EXP) \
- masm.Reset(); \
- masm.ASM; \
- masm.FinalizeCode(); \
- std::string res; \
- \
- Instruction* instruction = \
- masm.GetBuffer()->GetStartAddress<Instruction*>(); \
- Instruction* end = masm.GetCursorAddress<Instruction*>(); \
- while (instruction != end) { \
- decoder.Decode(instruction); \
- res.append(disasm.GetOutput()); \
- if (Test::disassemble()) { \
- encoding = *reinterpret_cast<uint32_t*>(instruction); \
- printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
- } \
- instruction += kInstructionSize; \
- if (instruction != end) { \
- res.append("\n"); \
- } \
+#define COMPARE_MACRO_BASE(ASM, EXP) \
+ masm.Reset(); \
+ masm.ASM; \
+ masm.FinalizeCode(); \
+ std::string res; \
+ \
+ Instruction* instruction = \
+ masm.GetBuffer()->GetStartAddress<Instruction*>(); \
+ Instruction* end = masm.GetCursorAddress<Instruction*>(); \
+ while (instruction != end) { \
+ decoder.Decode(instruction); \
+ res.append(disasm.GetOutput()); \
+ if (Test::disassemble()) { \
+ encoding = *reinterpret_cast<uint32_t*>(instruction); \
+ printf("%08" PRIx32 "\t%s\n", encoding, disasm.GetOutput()); \
+ } \
+ instruction += kInstructionSize; \
+ if (instruction != end) { \
+ res.append("\n"); \
+ } \
}
-#define COMPARE_MACRO(ASM, EXP) \
- { \
- COMPARE_MACRO_BASE(ASM, EXP) \
- if (strcmp(res.c_str(), EXP) != 0) { \
- printf("Expected: %s\nFound: %s\n", EXP, res.c_str()); \
- abort(); \
- } \
+#define COMPARE_MACRO(ASM, EXP) \
+ { \
+ COMPARE_MACRO_BASE(ASM, EXP) \
+ if (strcmp(res.c_str(), EXP) != 0) { \
+ printf("Expected: %s\nFound: %s\n", EXP, res.c_str()); \
+ abort(); \
+ } \
}
-#define COMPARE_MACRO_PREFIX(ASM, EXP) \
- { \
- COMPARE_MACRO_BASE(ASM, EXP) \
- if (strncmp(res.c_str(), EXP, strlen(EXP)) != 0) { \
- printf("Expected (prefix): %s\nFound: %s\n", EXP, res.c_str()); \
- abort(); \
- } \
+#define COMPARE_MACRO_PREFIX(ASM, EXP) \
+ { \
+ COMPARE_MACRO_BASE(ASM, EXP) \
+ if (strncmp(res.c_str(), EXP, strlen(EXP)) != 0) { \
+ printf("Expected (prefix): %s\nFound: %s\n", EXP, res.c_str()); \
+ abort(); \
+ } \
}
#define CLEANUP()
@@ -174,25 +180,25 @@
TEST(mov_mvn) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Mov(w0, Operand(0x1234)), "mov w0, #0x1234");
- COMPARE(Mov(x1, Operand(0x1234)), "mov x1, #0x1234");
- COMPARE(Mov(w2, Operand(w3)), "mov w2, w3");
- COMPARE(Mov(x4, Operand(x5)), "mov x4, x5");
- COMPARE(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
- COMPARE(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
- COMPARE(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
- COMPARE(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
- COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
- COMPARE(Mov(x16, Operand(x17, SXTW, 3)), "sbfiz x16, x17, #3, #32");
+ COMPARE_MACRO(Mov(w0, Operand(0x1234)), "mov w0, #0x1234");
+ COMPARE_MACRO(Mov(x1, Operand(0x1234)), "mov x1, #0x1234");
+ COMPARE_MACRO(Mov(w2, Operand(w3)), "mov w2, w3");
+ COMPARE_MACRO(Mov(x4, Operand(x5)), "mov x4, x5");
+ COMPARE_MACRO(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
+ COMPARE_MACRO(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
+ COMPARE_MACRO(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
+ COMPARE_MACRO(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
+ COMPARE_MACRO(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
+ COMPARE_MACRO(Mov(x16, Operand(x17, SXTW, 3)), "sbfiz x16, x17, #3, #32");
- COMPARE(Mvn(w0, Operand(0x101)), "mov w0, #0xfffffefe");
- COMPARE(Mvn(x1, Operand(0xfff1)), "mov x1, #0xffffffffffff000e");
- COMPARE(Mvn(w2, Operand(w3)), "mvn w2, w3");
- COMPARE(Mvn(x4, Operand(x5)), "mvn x4, x5");
- COMPARE(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
- COMPARE(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
+ COMPARE_MACRO(Mvn(w0, Operand(0x101)), "mov w0, #0xfffffefe");
+ COMPARE_MACRO(Mvn(x1, Operand(0xfff1)), "mov x1, #0xffffffffffff000e");
+ COMPARE_MACRO(Mvn(w2, Operand(w3)), "mvn w2, w3");
+ COMPARE_MACRO(Mvn(x4, Operand(x5)), "mvn x4, x5");
+ COMPARE_MACRO(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
+ COMPARE_MACRO(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
CLEANUP();
}
@@ -240,56 +246,56 @@
TEST(move_immediate_2) {
- SETUP_MACRO();
+ SETUP();
// Move instructions expected for certain immediates. This is really a macro
// assembler test, to ensure it generates immediates efficiently.
- COMPARE(Mov(w0, 0), "mov w0, #0x0");
- COMPARE(Mov(w0, 0x0000ffff), "mov w0, #0xffff");
- COMPARE(Mov(w0, 0x00010000), "mov w0, #0x10000");
- COMPARE(Mov(w0, 0xffff0000), "mov w0, #0xffff0000");
- COMPARE(Mov(w0, 0x0001ffff), "mov w0, #0x1ffff");
- COMPARE(Mov(w0, 0xffff8000), "mov w0, #0xffff8000");
- COMPARE(Mov(w0, 0xfffffffe), "mov w0, #0xfffffffe");
- COMPARE(Mov(w0, 0xffffffff), "mov w0, #0xffffffff");
- COMPARE(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
- COMPARE(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
- COMPARE(Mov(w0, 0xfffeffff), "mov w0, #0xfffeffff");
- COMPARE(Mov(w0, 0xffff7fff), "mov w0, #0xffff7fff");
+ COMPARE_MACRO(Mov(w0, 0), "mov w0, #0x0");
+ COMPARE_MACRO(Mov(w0, 0x0000ffff), "mov w0, #0xffff");
+ COMPARE_MACRO(Mov(w0, 0x00010000), "mov w0, #0x10000");
+ COMPARE_MACRO(Mov(w0, 0xffff0000), "mov w0, #0xffff0000");
+ COMPARE_MACRO(Mov(w0, 0x0001ffff), "mov w0, #0x1ffff");
+ COMPARE_MACRO(Mov(w0, 0xffff8000), "mov w0, #0xffff8000");
+ COMPARE_MACRO(Mov(w0, 0xfffffffe), "mov w0, #0xfffffffe");
+ COMPARE_MACRO(Mov(w0, 0xffffffff), "mov w0, #0xffffffff");
+ COMPARE_MACRO(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
+ COMPARE_MACRO(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
+ COMPARE_MACRO(Mov(w0, 0xfffeffff), "mov w0, #0xfffeffff");
+ COMPARE_MACRO(Mov(w0, 0xffff7fff), "mov w0, #0xffff7fff");
- COMPARE(Mov(x0, 0), "mov x0, #0x0");
- COMPARE(Mov(x0, 0x0000ffff), "mov x0, #0xffff");
- COMPARE(Mov(x0, 0x00010000), "mov x0, #0x10000");
- COMPARE(Mov(x0, 0xffff0000), "mov x0, #0xffff0000");
- COMPARE(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
- COMPARE(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
- COMPARE(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
- COMPARE(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
- COMPARE(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
- COMPARE(Mov(x0, 0xffff000000000000), "mov x0, #0xffff000000000000");
- COMPARE(Mov(x0, 0x0000ffff00000000), "mov x0, #0xffff00000000");
- COMPARE(Mov(x0, 0x00000000ffff0000), "mov x0, #0xffff0000");
- COMPARE(Mov(x0, 0xffffffffffff0000), "mov x0, #0xffffffffffff0000");
- COMPARE(Mov(x0, 0xffffffff0000ffff), "mov x0, #0xffffffff0000ffff");
- COMPARE(Mov(x0, 0xffff0000ffffffff), "mov x0, #0xffff0000ffffffff");
- COMPARE(Mov(x0, 0x0000ffffffffffff), "mov x0, #0xffffffffffff");
- COMPARE(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
- COMPARE(Mov(x0, 0xfffeffffffffffff), "mov x0, #0xfffeffffffffffff");
- COMPARE(Mov(x0, 0xffff7fffffffffff), "mov x0, #0xffff7fffffffffff");
- COMPARE(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
- COMPARE(Mov(x0, 0xfffffffeffffffff), "mov x0, #0xfffffffeffffffff");
- COMPARE(Mov(x0, 0xffffffff7fffffff), "mov x0, #0xffffffff7fffffff");
- COMPARE(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
- COMPARE(Mov(x0, 0xfffffffffffeffff), "mov x0, #0xfffffffffffeffff");
- COMPARE(Mov(x0, 0xffffffffffff7fff), "mov x0, #0xffffffffffff7fff");
- COMPARE(Mov(x0, 0xffffffffffffffff), "mov x0, #0xffffffffffffffff");
+ COMPARE_MACRO(Mov(x0, 0), "mov x0, #0x0");
+ COMPARE_MACRO(Mov(x0, 0x0000ffff), "mov x0, #0xffff");
+ COMPARE_MACRO(Mov(x0, 0x00010000), "mov x0, #0x10000");
+ COMPARE_MACRO(Mov(x0, 0xffff0000), "mov x0, #0xffff0000");
+ COMPARE_MACRO(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
+ COMPARE_MACRO(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
+ COMPARE_MACRO(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
+ COMPARE_MACRO(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
+ COMPARE_MACRO(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
+ COMPARE_MACRO(Mov(x0, 0xffff000000000000), "mov x0, #0xffff000000000000");
+ COMPARE_MACRO(Mov(x0, 0x0000ffff00000000), "mov x0, #0xffff00000000");
+ COMPARE_MACRO(Mov(x0, 0x00000000ffff0000), "mov x0, #0xffff0000");
+ COMPARE_MACRO(Mov(x0, 0xffffffffffff0000), "mov x0, #0xffffffffffff0000");
+ COMPARE_MACRO(Mov(x0, 0xffffffff0000ffff), "mov x0, #0xffffffff0000ffff");
+ COMPARE_MACRO(Mov(x0, 0xffff0000ffffffff), "mov x0, #0xffff0000ffffffff");
+ COMPARE_MACRO(Mov(x0, 0x0000ffffffffffff), "mov x0, #0xffffffffffff");
+ COMPARE_MACRO(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
+ COMPARE_MACRO(Mov(x0, 0xfffeffffffffffff), "mov x0, #0xfffeffffffffffff");
+ COMPARE_MACRO(Mov(x0, 0xffff7fffffffffff), "mov x0, #0xffff7fffffffffff");
+ COMPARE_MACRO(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
+ COMPARE_MACRO(Mov(x0, 0xfffffffeffffffff), "mov x0, #0xfffffffeffffffff");
+ COMPARE_MACRO(Mov(x0, 0xffffffff7fffffff), "mov x0, #0xffffffff7fffffff");
+ COMPARE_MACRO(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
+ COMPARE_MACRO(Mov(x0, 0xfffffffffffeffff), "mov x0, #0xfffffffffffeffff");
+ COMPARE_MACRO(Mov(x0, 0xffffffffffff7fff), "mov x0, #0xffffffffffff7fff");
+ COMPARE_MACRO(Mov(x0, 0xffffffffffffffff), "mov x0, #0xffffffffffffffff");
- COMPARE(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
- COMPARE(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
- COMPARE(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
- COMPARE(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
- COMPARE(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
- COMPARE(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
+ COMPARE_MACRO(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
+ COMPARE_MACRO(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
+ COMPARE_MACRO(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
+ COMPARE_MACRO(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
+ COMPARE_MACRO(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
+ COMPARE_MACRO(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
CLEANUP();
}
@@ -321,6 +327,11 @@
COMPARE(cmn(sp, Operand(24)), "cmn sp, #0x18 (24)");
COMPARE(adds(wzr, wsp, Operand(9)), "cmn wsp, #0x9 (9)");
+ // Instructions in the add/sub immediate space, but unallocated due to shift
+ // value out of range.
+ COMPARE(dci(0x11800400), "unallocated (Unallocated)");
+ COMPARE(dci(0x11c00400), "unallocated (Unallocated)");
+
CLEANUP();
}
@@ -706,7 +717,7 @@
TEST(logical_immediate) {
SETUP();
- #define RESULT_SIZE (256)
+#define RESULT_SIZE (256)
char result[RESULT_SIZE];
@@ -770,33 +781,25 @@
"and w0, w0, #0x55555555"); // 2-bit pattern.
// Test other instructions.
- COMPARE(tst(w1, Operand(0x11111111)),
- "tst w1, #0x11111111");
- COMPARE(tst(x2, Operand(0x8888888888888888)),
- "tst x2, #0x8888888888888888");
- COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
- "orr w7, w8, #0xaaaaaaaa");
+ COMPARE(tst(w1, Operand(0x11111111)), "tst w1, #0x11111111");
+ COMPARE(tst(x2, Operand(0x8888888888888888)), "tst x2, #0x8888888888888888");
+ COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)), "orr w7, w8, #0xaaaaaaaa");
COMPARE(orr(x9, x10, Operand(0x5555555555555555)),
"orr x9, x10, #0x5555555555555555");
- COMPARE(eor(w15, w16, Operand(0x00000001)),
- "eor w15, w16, #0x1");
- COMPARE(eor(x17, x18, Operand(0x0000000000000003)),
- "eor x17, x18, #0x3");
+ COMPARE(eor(w15, w16, Operand(0x00000001)), "eor w15, w16, #0x1");
+ COMPARE(eor(x17, x18, Operand(0x0000000000000003)), "eor x17, x18, #0x3");
COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
COMPARE(ands(x25, x26, Operand(0x800000000000000f)),
"ands x25, x26, #0x800000000000000f");
// Test inverse.
- COMPARE(bic(w3, w4, Operand(0x20202020)),
- "and w3, w4, #0xdfdfdfdf");
+ COMPARE(bic(w3, w4, Operand(0x20202020)), "and w3, w4, #0xdfdfdfdf");
COMPARE(bic(x5, x6, Operand(0x4040404040404040)),
"and x5, x6, #0xbfbfbfbfbfbfbfbf");
- COMPARE(orn(w11, w12, Operand(0x40004000)),
- "orr w11, w12, #0xbfffbfff");
+ COMPARE(orn(w11, w12, Operand(0x40004000)), "orr w11, w12, #0xbfffbfff");
COMPARE(orn(x13, x14, Operand(0x8181818181818181)),
"orr x13, x14, #0x7e7e7e7e7e7e7e7e");
- COMPARE(eon(w19, w20, Operand(0x80000001)),
- "eor w19, w20, #0x7ffffffe");
+ COMPARE(eon(w19, w20, Operand(0x80000001)), "eor w19, w20, #0x7ffffffe");
COMPARE(eon(x21, x22, Operand(0xc000000000000003)),
"eor x21, x22, #0x3ffffffffffffffc");
COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
@@ -951,7 +954,7 @@
TEST(branch) {
SETUP();
- #define INST_OFF(x) (INT64_C(x) >> kInstructionSizeLog2)
+#define INST_OFF(x) (INT64_C(x) >> kInstructionSizeLog2)
COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
@@ -1516,12 +1519,10 @@
SETUP();
// Just like load_store_unscaled, but specify the scaling option explicitly.
- LoadStoreScalingOption options[] = {
- PreferUnscaledOffset,
- RequireUnscaledOffset
- };
+ LoadStoreScalingOption options[] = {PreferUnscaledOffset,
+ RequireUnscaledOffset};
- for (size_t i = 0; i < sizeof(options)/sizeof(options[0]); i++) {
+ for (size_t i = 0; i < sizeof(options) / sizeof(options[0]); i++) {
LoadStoreScalingOption option = options[i];
// If an unscaled-offset instruction is requested, it is used, even if the
@@ -1748,8 +1749,7 @@
"stp x18, x19, [sp, #-8]!");
COMPARE(ldp(s30, s31, MemOperand(sp, 12, PostIndex)),
"ldp s30, s31, [sp], #12");
- COMPARE(stp(d30, d31, MemOperand(sp, -16)),
- "stp d30, d31, [sp, #-16]");
+ COMPARE(stp(d30, d31, MemOperand(sp, -16)), "stp d30, d31, [sp, #-16]");
COMPARE(ldp(q30, q31, MemOperand(sp, 32, PostIndex)),
"ldp q30, q31, [sp], #32");
@@ -1885,18 +1885,18 @@
TEST(load_literal_macro) {
- SETUP_MACRO();
+ SETUP();
// In each case, the literal will be placed at PC+8:
// ldr x10, pc+8 // Test instruction.
// ldr xzr, pc+12 // Pool marker.
// .word64 #0x1234567890abcdef // Test literal.
- COMPARE_PREFIX(Ldr(x10, 0x1234567890abcdef), "ldr x10, pc+8");
- COMPARE_PREFIX(Ldr(w20, 0xfedcba09), "ldr w20, pc+8");
- COMPARE_PREFIX(Ldr(d11, 1.234), "ldr d11, pc+8");
- COMPARE_PREFIX(Ldr(s22, 2.5f), "ldr s22, pc+8");
- COMPARE_PREFIX(Ldrsw(x21, 0x80000000), "ldrsw x21, pc+8");
+ COMPARE_MACRO_PREFIX(Ldr(x10, 0x1234567890abcdef), "ldr x10, pc+8");
+ COMPARE_MACRO_PREFIX(Ldr(w20, 0xfedcba09), "ldr w20, pc+8");
+ COMPARE_MACRO_PREFIX(Ldr(d11, 1.234), "ldr d11, pc+8");
+ COMPARE_MACRO_PREFIX(Ldr(s22, 2.5f), "ldr s22, pc+8");
+ COMPARE_MACRO_PREFIX(Ldrsw(x21, 0x80000000), "ldrsw x21, pc+8");
CLEANUP();
}
@@ -1940,38 +1940,17 @@
// Test every encodable prefetch operation.
const char* expected[] = {
- "prfm pldl1keep, ",
- "prfm pldl1strm, ",
- "prfm pldl2keep, ",
- "prfm pldl2strm, ",
- "prfm pldl3keep, ",
- "prfm pldl3strm, ",
- "prfm #0b00110, ",
- "prfm #0b00111, ",
- "prfm plil1keep, ",
- "prfm plil1strm, ",
- "prfm plil2keep, ",
- "prfm plil2strm, ",
- "prfm plil3keep, ",
- "prfm plil3strm, ",
- "prfm #0b01110, ",
- "prfm #0b01111, ",
- "prfm pstl1keep, ",
- "prfm pstl1strm, ",
- "prfm pstl2keep, ",
- "prfm pstl2strm, ",
- "prfm pstl3keep, ",
- "prfm pstl3strm, ",
- "prfm #0b10110, ",
- "prfm #0b10111, ",
- "prfm #0b11000, ",
- "prfm #0b11001, ",
- "prfm #0b11010, ",
- "prfm #0b11011, ",
- "prfm #0b11100, ",
- "prfm #0b11101, ",
- "prfm #0b11110, ",
- "prfm #0b11111, ",
+ "prfm pldl1keep, ", "prfm pldl1strm, ", "prfm pldl2keep, ",
+ "prfm pldl2strm, ", "prfm pldl3keep, ", "prfm pldl3strm, ",
+ "prfm #0b00110, ", "prfm #0b00111, ", "prfm plil1keep, ",
+ "prfm plil1strm, ", "prfm plil2keep, ", "prfm plil2strm, ",
+ "prfm plil3keep, ", "prfm plil3strm, ", "prfm #0b01110, ",
+ "prfm #0b01111, ", "prfm pstl1keep, ", "prfm pstl1strm, ",
+ "prfm pstl2keep, ", "prfm pstl2strm, ", "prfm pstl3keep, ",
+ "prfm pstl3strm, ", "prfm #0b10110, ", "prfm #0b10111, ",
+ "prfm #0b11000, ", "prfm #0b11001, ", "prfm #0b11010, ",
+ "prfm #0b11011, ", "prfm #0b11100, ", "prfm #0b11101, ",
+ "prfm #0b11110, ", "prfm #0b11111, ",
};
const int expected_count = sizeof(expected) / sizeof(expected[0]);
VIXL_STATIC_ASSERT((1 << ImmPrefetchOperation_width) == expected_count);
@@ -1992,38 +1971,17 @@
// Test every encodable prefetch operation.
const char* expected[] = {
- "prfum pldl1keep, ",
- "prfum pldl1strm, ",
- "prfum pldl2keep, ",
- "prfum pldl2strm, ",
- "prfum pldl3keep, ",
- "prfum pldl3strm, ",
- "prfum #0b00110, ",
- "prfum #0b00111, ",
- "prfum plil1keep, ",
- "prfum plil1strm, ",
- "prfum plil2keep, ",
- "prfum plil2strm, ",
- "prfum plil3keep, ",
- "prfum plil3strm, ",
- "prfum #0b01110, ",
- "prfum #0b01111, ",
- "prfum pstl1keep, ",
- "prfum pstl1strm, ",
- "prfum pstl2keep, ",
- "prfum pstl2strm, ",
- "prfum pstl3keep, ",
- "prfum pstl3strm, ",
- "prfum #0b10110, ",
- "prfum #0b10111, ",
- "prfum #0b11000, ",
- "prfum #0b11001, ",
- "prfum #0b11010, ",
- "prfum #0b11011, ",
- "prfum #0b11100, ",
- "prfum #0b11101, ",
- "prfum #0b11110, ",
- "prfum #0b11111, ",
+ "prfum pldl1keep, ", "prfum pldl1strm, ", "prfum pldl2keep, ",
+ "prfum pldl2strm, ", "prfum pldl3keep, ", "prfum pldl3strm, ",
+ "prfum #0b00110, ", "prfum #0b00111, ", "prfum plil1keep, ",
+ "prfum plil1strm, ", "prfum plil2keep, ", "prfum plil2strm, ",
+ "prfum plil3keep, ", "prfum plil3strm, ", "prfum #0b01110, ",
+ "prfum #0b01111, ", "prfum pstl1keep, ", "prfum pstl1strm, ",
+ "prfum pstl2keep, ", "prfum pstl2strm, ", "prfum pstl3keep, ",
+ "prfum pstl3strm, ", "prfum #0b10110, ", "prfum #0b10111, ",
+ "prfum #0b11000, ", "prfum #0b11001, ", "prfum #0b11010, ",
+ "prfum #0b11011, ", "prfum #0b11100, ", "prfum #0b11101, ",
+ "prfum #0b11110, ", "prfum #0b11111, ",
};
const int expected_count = sizeof(expected) / sizeof(expected[0]);
VIXL_STATIC_ASSERT((1 << ImmPrefetchOperation_width) == expected_count);
@@ -2224,7 +2182,7 @@
TEST(cond_select_macro) {
- SETUP_MACRO();
+ SETUP();
// In the tests below we also test the `GetCselSynthesisInformation()` helper.
// These tests are here (rather than in test-assembler-aarch64.cc) because the
@@ -2232,155 +2190,213 @@
bool synthesises_left = false;
bool synthesises_right = false;
- COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
- MacroAssembler::GetCselSynthesisInformation(w0, w1, -1,
+ COMPARE_MACRO(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
+ MacroAssembler::GetCselSynthesisInformation(w0,
+ w1,
+ -1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
- MacroAssembler::GetCselSynthesisInformation(w2, w3, wzr,
+ COMPARE_MACRO(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
+ MacroAssembler::GetCselSynthesisInformation(w2,
+ w3,
+ wzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
- MacroAssembler::GetCselSynthesisInformation(w4, w5, 1,
+ COMPARE_MACRO(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
+ MacroAssembler::GetCselSynthesisInformation(w4,
+ w5,
+ 1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
- MacroAssembler::GetCselSynthesisInformation(x6, x7, xzr,
+ COMPARE_MACRO(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
+ MacroAssembler::GetCselSynthesisInformation(x6,
+ x7,
+ xzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
- MacroAssembler::GetCselSynthesisInformation(x8, x9, xzr,
+ COMPARE_MACRO(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
+ MacroAssembler::GetCselSynthesisInformation(x8,
+ x9,
+ xzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
- MacroAssembler::GetCselSynthesisInformation(x10, x11, xzr,
+ COMPARE_MACRO(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
+ MacroAssembler::GetCselSynthesisInformation(x10,
+ x11,
+ xzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(x12, 0, 0, eq), "mov x12, #0x0");
- MacroAssembler::GetCselSynthesisInformation(x12, 0, 0,
+ COMPARE_MACRO(Csel(x12, 0, 0, eq), "mov x12, #0x0");
+ MacroAssembler::GetCselSynthesisInformation(x12,
+ 0,
+ 0,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(w13, 0, 1, eq), "cset w13, ne");
- MacroAssembler::GetCselSynthesisInformation(w13, 0, 1,
+ COMPARE_MACRO(Csel(w13, 0, 1, eq), "cset w13, ne");
+ MacroAssembler::GetCselSynthesisInformation(w13,
+ 0,
+ 1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(x14, 1, 0, eq), "cset x14, eq");
- MacroAssembler::GetCselSynthesisInformation(x14, 1, 0,
+ COMPARE_MACRO(Csel(x14, 1, 0, eq), "cset x14, eq");
+ MacroAssembler::GetCselSynthesisInformation(x14,
+ 1,
+ 0,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(w15, 0, -1, eq), "csetm w15, ne");
- MacroAssembler::GetCselSynthesisInformation(w15, 0, -1,
+ COMPARE_MACRO(Csel(w15, 0, -1, eq), "csetm w15, ne");
+ MacroAssembler::GetCselSynthesisInformation(w15,
+ 0,
+ -1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(x18, -1, 0, eq), "csetm x18, eq");
- MacroAssembler::GetCselSynthesisInformation(x18, -1, 0,
+ COMPARE_MACRO(Csel(x18, -1, 0, eq), "csetm x18, eq");
+ MacroAssembler::GetCselSynthesisInformation(x18,
+ -1,
+ 0,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(w19, -1, 1, eq), "mov w19, #0x1\n"
- "cneg w19, w19, eq");
- MacroAssembler::GetCselSynthesisInformation(w19, -1, 1,
+ COMPARE_MACRO(Csel(w19, -1, 1, eq),
+ "mov w19, #0x1\n"
+ "cneg w19, w19, eq");
+ MacroAssembler::GetCselSynthesisInformation(w19,
+ -1,
+ 1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x20, 1, -1, eq), "mov x20, #0xffffffffffffffff\n"
- "cneg x20, x20, eq");
- MacroAssembler::GetCselSynthesisInformation(x20, 1, -1,
+ COMPARE_MACRO(Csel(x20, 1, -1, eq),
+ "mov x20, #0xffffffffffffffff\n"
+ "cneg x20, x20, eq");
+ MacroAssembler::GetCselSynthesisInformation(x20,
+ 1,
+ -1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(w21, 0xaa, 0xbb, eq), "mov w16, #0xaa\n"
- "mov w17, #0xbb\n"
- "csel w21, w16, w17, eq");
- MacroAssembler::GetCselSynthesisInformation(w21, 0xaa, 0xbb,
+ COMPARE_MACRO(Csel(w21, 0xaa, 0xbb, eq),
+ "mov w16, #0xaa\n"
+ "mov w17, #0xbb\n"
+ "csel w21, w16, w17, eq");
+ MacroAssembler::GetCselSynthesisInformation(w21,
+ 0xaa,
+ 0xbb,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x22, 0xaa, -0xbb, eq), "mov x16, #0xaa\n"
- "mov x17, #0xffffffffffffff45\n"
- "csel x22, x16, x17, eq");
- MacroAssembler::GetCselSynthesisInformation(x22, 0xaa, -0xbb,
+ COMPARE_MACRO(Csel(x22, 0xaa, -0xbb, eq),
+ "mov x16, #0xaa\n"
+ "mov x17, #0xffffffffffffff45\n"
+ "csel x22, x16, x17, eq");
+ MacroAssembler::GetCselSynthesisInformation(x22,
+ 0xaa,
+ -0xbb,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(w23, 0, 0xaa, eq), "mov w16, #0xaa\n"
- "csel w23, w16, wzr, ne");
- MacroAssembler::GetCselSynthesisInformation(w23, 0, 0xaa,
+ COMPARE_MACRO(Csel(w23, 0, 0xaa, eq),
+ "mov w16, #0xaa\n"
+ "csel w23, w16, wzr, ne");
+ MacroAssembler::GetCselSynthesisInformation(w23,
+ 0,
+ 0xaa,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x24, -0xaa, 0, eq), "mov x16, #0xffffffffffffff56\n"
- "csel x24, x16, xzr, eq");
- MacroAssembler::GetCselSynthesisInformation(x24, -0xaa, 0,
+ COMPARE_MACRO(Csel(x24, -0xaa, 0, eq),
+ "mov x16, #0xffffffffffffff56\n"
+ "csel x24, x16, xzr, eq");
+ MacroAssembler::GetCselSynthesisInformation(x24,
+ -0xaa,
+ 0,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(w25, 0xcc, -0xcc, eq), "mov w25, #0xffffff34\n"
- "cneg w25, w25, eq");
- MacroAssembler::GetCselSynthesisInformation(w25, 0xcc, -0xcc,
+ COMPARE_MACRO(Csel(w25, 0xcc, -0xcc, eq),
+ "mov w25, #0xffffff34\n"
+ "cneg w25, w25, eq");
+ MacroAssembler::GetCselSynthesisInformation(w25,
+ 0xcc,
+ -0xcc,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x26, -0xcc, 0xcc, eq), "mov x26, #0xcc\n"
- "cneg x26, x26, eq");
- MacroAssembler::GetCselSynthesisInformation(w25, -0xcc, 0xcc,
+ COMPARE_MACRO(Csel(x26, -0xcc, 0xcc, eq),
+ "mov x26, #0xcc\n"
+ "cneg x26, x26, eq");
+ MacroAssembler::GetCselSynthesisInformation(w25,
+ -0xcc,
+ 0xcc,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
// Test with `Operand` inputs.
- COMPARE_MACRO(Csel(x0, x1, Operand(x2, LSL, 3), eq), "lsl x16, x2, #3\n"
- "csel x0, x1, x16, eq");
- MacroAssembler::GetCselSynthesisInformation(x0, x1, Operand(x2, LSL, 3),
+ COMPARE_MACRO(Csel(x0, x1, Operand(x2, LSL, 3), eq),
+ "lsl x16, x2, #3\n"
+ "csel x0, x1, x16, eq");
+ MacroAssembler::GetCselSynthesisInformation(x0,
+ x1,
+ Operand(x2, LSL, 3),
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x3, x4, Operand(x5, SXTH), eq), "sxth x16, w5\n"
- "csel x3, x4, x16, eq");
- MacroAssembler::GetCselSynthesisInformation(x3, x4, Operand(x5, SXTH),
+ COMPARE_MACRO(Csel(x3, x4, Operand(x5, SXTH), eq),
+ "sxth x16, w5\n"
+ "csel x3, x4, x16, eq");
+ MacroAssembler::GetCselSynthesisInformation(x3,
+ x4,
+ Operand(x5, SXTH),
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
- COMPARE_MACRO(Csel(x6, Operand(x7, LSL, 7), x8, eq), "lsl x16, x7, #7\n"
- "csel x6, x16, x8, eq");
- MacroAssembler::GetCselSynthesisInformation(x6, Operand(x7, LSL, 7), x8,
+ COMPARE_MACRO(Csel(x6, Operand(x7, LSL, 7), x8, eq),
+ "lsl x16, x7, #7\n"
+ "csel x6, x16, x8, eq");
+ MacroAssembler::GetCselSynthesisInformation(x6,
+ Operand(x7, LSL, 7),
+ x8,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(synthesises_left && !synthesises_right);
- COMPARE_MACRO(Csel(x9, Operand(x10, SXTH), x11, eq), "sxth x16, w10\n"
- "csel x9, x16, x11, eq");
- MacroAssembler::GetCselSynthesisInformation(x9, Operand(x10, SXTH), x11,
+ COMPARE_MACRO(Csel(x9, Operand(x10, SXTH), x11, eq),
+ "sxth x16, w10\n"
+ "csel x9, x16, x11, eq");
+ MacroAssembler::GetCselSynthesisInformation(x9,
+ Operand(x10, SXTH),
+ x11,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(synthesises_left && !synthesises_right);
@@ -2399,38 +2415,50 @@
COMPARE_MACRO(Csel(x15, 0, Operand(x18, LSR, 18), eq),
"lsr x16, x18, #18\n"
"csel x15, x16, xzr, ne");
- MacroAssembler::GetCselSynthesisInformation(x15, 0, Operand(x18, LSR, 18),
+ MacroAssembler::GetCselSynthesisInformation(x15,
+ 0,
+ Operand(x18, LSR, 18),
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && synthesises_right);
// Test with the zero register.
COMPARE_MACRO(Csel(w19, wzr, wzr, eq), "mov w19, #0x0");
- MacroAssembler::GetCselSynthesisInformation(w19, wzr, wzr,
+ MacroAssembler::GetCselSynthesisInformation(w19,
+ wzr,
+ wzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
COMPARE_MACRO(Csel(x20, x21, xzr, eq), "csel x20, x21, xzr, eq");
- MacroAssembler::GetCselSynthesisInformation(x20, x21, xzr,
+ MacroAssembler::GetCselSynthesisInformation(x20,
+ x21,
+ xzr,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
COMPARE_MACRO(Csel(w22, wzr, w23, eq), "csel w22, w23, wzr, ne");
- MacroAssembler::GetCselSynthesisInformation(w22, wzr, w23,
+ MacroAssembler::GetCselSynthesisInformation(w22,
+ wzr,
+ w23,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
COMPARE_MACRO(Csel(x24, xzr, 0, eq), "mov x24, #0x0");
- MacroAssembler::GetCselSynthesisInformation(x24, xzr, 0,
+ MacroAssembler::GetCselSynthesisInformation(x24,
+ xzr,
+ 0,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
COMPARE_MACRO(Csel(w25, wzr, 1, eq), "cset w25, ne");
- MacroAssembler::GetCselSynthesisInformation(w25, wzr, 1,
+ MacroAssembler::GetCselSynthesisInformation(w25,
+ wzr,
+ 1,
&synthesises_left,
&synthesises_right);
VIXL_CHECK(!synthesises_left && !synthesises_right);
@@ -2459,12 +2487,12 @@
TEST(cond_cmp_macro) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
- COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
- COMPARE(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
- COMPARE(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
+ COMPARE_MACRO(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
+ COMPARE_MACRO(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
+ COMPARE_MACRO(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
+ COMPARE_MACRO(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
CLEANUP();
}
@@ -2683,14 +2711,14 @@
COMPARE(fcvtzu(w18, s19), "fcvtzu w18, s19");
COMPARE(fcvtzs(x20, s21), "fcvtzs x20, s21");
COMPARE(fcvtzs(w22, s23), "fcvtzs w22, s23");
- COMPARE(fcvtzs(w2, d1, 1), "fcvtzs w2, d1, #1");
- COMPARE(fcvtzs(w2, s1, 1), "fcvtzs w2, s1, #1");
+ COMPARE(fcvtzs(w2, d1, 1), "fcvtzs w2, d1, #1");
+ COMPARE(fcvtzs(w2, s1, 1), "fcvtzs w2, s1, #1");
COMPARE(fcvtzs(x4, d3, 15), "fcvtzs x4, d3, #15");
COMPARE(fcvtzs(x4, s3, 15), "fcvtzs x4, s3, #15");
COMPARE(fcvtzs(w6, d5, 32), "fcvtzs w6, d5, #32");
COMPARE(fcvtzs(w6, s5, 32), "fcvtzs w6, s5, #32");
- COMPARE(fcvtzu(w2, d1, 1), "fcvtzu w2, d1, #1");
- COMPARE(fcvtzu(w2, s1, 1), "fcvtzu w2, s1, #1");
+ COMPARE(fcvtzu(w2, d1, 1), "fcvtzu w2, d1, #1");
+ COMPARE(fcvtzu(w2, s1, 1), "fcvtzu w2, s1, #1");
COMPARE(fcvtzu(x4, d3, 15), "fcvtzu x4, d3, #15");
COMPARE(fcvtzu(x4, s3, 15), "fcvtzu x4, s3, #15");
COMPARE(fcvtzu(w6, d5, 32), "fcvtzu w6, d5, #32");
@@ -2824,13 +2852,13 @@
TEST(unreachable) {
- SETUP_MACRO();
+ SETUP();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
VIXL_ASSERT(kUnreachableOpcode == 0xdeb0);
- COMPARE(Unreachable(), "hlt #0xdeb0");
+ COMPARE_MACRO(Unreachable(), "hlt #0xdeb0");
#else
- COMPARE(Unreachable(), "blr xzr");
+ COMPARE_MACRO(Unreachable(), "blr xzr");
#endif
CLEANUP();
@@ -2839,7 +2867,7 @@
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
TEST(trace) {
- SETUP_MACRO();
+ SETUP();
VIXL_ASSERT(kTraceOpcode == 0xdeb2);
@@ -2854,7 +2882,7 @@
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
TEST(log) {
- SETUP_MACRO();
+ SETUP();
VIXL_ASSERT(kLogOpcode == 0xdeb3);
@@ -2901,365 +2929,422 @@
TEST(add_sub_negative) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
- COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
- COMPARE(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
+ COMPARE_MACRO(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
+ COMPARE_MACRO(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
+ COMPARE_MACRO(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
- COMPARE(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
- COMPARE(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
- COMPARE(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
+ COMPARE_MACRO(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
+ COMPARE_MACRO(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
+ COMPARE_MACRO(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
- COMPARE(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
- COMPARE(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
+ COMPARE_MACRO(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
+ COMPARE_MACRO(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
- COMPARE(Add(w0, w1, 5, LeaveFlags), "add w0, w1, #0x5 (5)");
- COMPARE(Add(w1, w2, 15, SetFlags), "adds w1, w2, #0xf (15)");
+ COMPARE_MACRO(Add(w0, w1, 5, LeaveFlags), "add w0, w1, #0x5 (5)");
+ COMPARE_MACRO(Add(w1, w2, 15, SetFlags), "adds w1, w2, #0xf (15)");
- COMPARE(Sub(w0, w1, 5, LeaveFlags), "sub w0, w1, #0x5 (5)");
- COMPARE(Sub(w1, w2, 15, SetFlags), "subs w1, w2, #0xf (15)");
+ COMPARE_MACRO(Sub(w0, w1, 5, LeaveFlags), "sub w0, w1, #0x5 (5)");
+ COMPARE_MACRO(Sub(w1, w2, 15, SetFlags), "subs w1, w2, #0xf (15)");
- COMPARE(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
- COMPARE(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
+ COMPARE_MACRO(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
+ COMPARE_MACRO(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
- COMPARE(Cmp(w0, -1), "cmn w0, #0x1 (1)");
- COMPARE(Cmp(x1, -1), "cmn x1, #0x1 (1)");
- COMPARE(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
- COMPARE(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
+ COMPARE_MACRO(Cmp(w0, -1), "cmn w0, #0x1 (1)");
+ COMPARE_MACRO(Cmp(x1, -1), "cmn x1, #0x1 (1)");
+ COMPARE_MACRO(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
+ COMPARE_MACRO(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
- COMPARE(Cmn(w0, -1), "cmp w0, #0x1 (1)");
- COMPARE(Cmn(x1, -1), "cmp x1, #0x1 (1)");
- COMPARE(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
- COMPARE(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
+ COMPARE_MACRO(Cmn(w0, -1), "cmp w0, #0x1 (1)");
+ COMPARE_MACRO(Cmn(x1, -1), "cmp x1, #0x1 (1)");
+ COMPARE_MACRO(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
+ COMPARE_MACRO(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
CLEANUP();
}
TEST(logical_immediate_move) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(And(w0, w1, 0), "mov w0, #0x0");
- COMPARE(And(x0, x1, 0), "mov x0, #0x0");
- COMPARE(Orr(w2, w3, 0), "mov w2, w3");
- COMPARE(Orr(x2, x3, 0), "mov x2, x3");
- COMPARE(Eor(w4, w5, 0), "mov w4, w5");
- COMPARE(Eor(x4, x5, 0), "mov x4, x5");
- COMPARE(Bic(w6, w7, 0), "mov w6, w7");
- COMPARE(Bic(x6, x7, 0), "mov x6, x7");
- COMPARE(Orn(w8, w9, 0), "mov w8, #0xffffffff");
- COMPARE(Orn(x8, x9, 0), "mov x8, #0xffffffffffffffff");
- COMPARE(Eon(w10, w11, 0), "mvn w10, w11");
- COMPARE(Eon(x10, x11, 0), "mvn x10, x11");
+ COMPARE_MACRO(And(w0, w1, 0), "mov w0, #0x0");
+ COMPARE_MACRO(And(x0, x1, 0), "mov x0, #0x0");
+ COMPARE_MACRO(Orr(w2, w3, 0), "mov w2, w3");
+ COMPARE_MACRO(Orr(x2, x3, 0), "mov x2, x3");
+ COMPARE_MACRO(Eor(w4, w5, 0), "mov w4, w5");
+ COMPARE_MACRO(Eor(x4, x5, 0), "mov x4, x5");
+ COMPARE_MACRO(Bic(w6, w7, 0), "mov w6, w7");
+ COMPARE_MACRO(Bic(x6, x7, 0), "mov x6, x7");
+ COMPARE_MACRO(Orn(w8, w9, 0), "mov w8, #0xffffffff");
+ COMPARE_MACRO(Orn(x8, x9, 0), "mov x8, #0xffffffffffffffff");
+ COMPARE_MACRO(Eon(w10, w11, 0), "mvn w10, w11");
+ COMPARE_MACRO(Eon(x10, x11, 0), "mvn x10, x11");
- COMPARE(And(w12, w13, 0xffffffff), "mov w12, w13");
- COMPARE(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
- COMPARE(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
- COMPARE(Orr(w14, w15, 0xffffffff), "mov w14, #0xffffffff");
- COMPARE(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
- COMPARE(Orr(x14, x15, 0xffffffffffffffff), "mov x14, #0xffffffffffffffff");
- COMPARE(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
- COMPARE(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
- COMPARE(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
- COMPARE(Bic(w18, w19, 0xffffffff), "mov w18, #0x0");
- COMPARE(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
- COMPARE(Bic(x18, x19, 0xffffffffffffffff), "mov x18, #0x0");
- COMPARE(Orn(w20, w21, 0xffffffff), "mov w20, w21");
- COMPARE(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
- COMPARE(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
- COMPARE(Eon(w22, w23, 0xffffffff), "mov w22, w23");
- COMPARE(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
- COMPARE(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
+ COMPARE_MACRO(And(w12, w13, 0xffffffff), "mov w12, w13");
+ COMPARE_MACRO(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
+ COMPARE_MACRO(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
+ COMPARE_MACRO(Orr(w14, w15, 0xffffffff), "mov w14, #0xffffffff");
+ COMPARE_MACRO(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
+ COMPARE_MACRO(Orr(x14, x15, 0xffffffffffffffff),
+ "mov x14, #0xffffffffffffffff");
+ COMPARE_MACRO(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
+ COMPARE_MACRO(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
+ COMPARE_MACRO(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
+ COMPARE_MACRO(Bic(w18, w19, 0xffffffff), "mov w18, #0x0");
+ COMPARE_MACRO(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
+ COMPARE_MACRO(Bic(x18, x19, 0xffffffffffffffff), "mov x18, #0x0");
+ COMPARE_MACRO(Orn(w20, w21, 0xffffffff), "mov w20, w21");
+ COMPARE_MACRO(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
+ COMPARE_MACRO(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
+ COMPARE_MACRO(Eon(w22, w23, 0xffffffff), "mov w22, w23");
+ COMPARE_MACRO(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
+ COMPARE_MACRO(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
CLEANUP();
}
TEST(barriers) {
- SETUP_MACRO();
+ SETUP();
// DMB
- COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
- COMPARE(Dmb(FullSystem, BarrierReads), "dmb ld");
- COMPARE(Dmb(FullSystem, BarrierWrites), "dmb st");
+ COMPARE_MACRO(Dmb(FullSystem, BarrierAll), "dmb sy");
+ COMPARE_MACRO(Dmb(FullSystem, BarrierReads), "dmb ld");
+ COMPARE_MACRO(Dmb(FullSystem, BarrierWrites), "dmb st");
- COMPARE(Dmb(InnerShareable, BarrierAll), "dmb ish");
- COMPARE(Dmb(InnerShareable, BarrierReads), "dmb ishld");
- COMPARE(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
+ COMPARE_MACRO(Dmb(InnerShareable, BarrierAll), "dmb ish");
+ COMPARE_MACRO(Dmb(InnerShareable, BarrierReads), "dmb ishld");
+ COMPARE_MACRO(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
- COMPARE(Dmb(NonShareable, BarrierAll), "dmb nsh");
- COMPARE(Dmb(NonShareable, BarrierReads), "dmb nshld");
- COMPARE(Dmb(NonShareable, BarrierWrites), "dmb nshst");
+ COMPARE_MACRO(Dmb(NonShareable, BarrierAll), "dmb nsh");
+ COMPARE_MACRO(Dmb(NonShareable, BarrierReads), "dmb nshld");
+ COMPARE_MACRO(Dmb(NonShareable, BarrierWrites), "dmb nshst");
- COMPARE(Dmb(OuterShareable, BarrierAll), "dmb osh");
- COMPARE(Dmb(OuterShareable, BarrierReads), "dmb oshld");
- COMPARE(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
+ COMPARE_MACRO(Dmb(OuterShareable, BarrierAll), "dmb osh");
+ COMPARE_MACRO(Dmb(OuterShareable, BarrierReads), "dmb oshld");
+ COMPARE_MACRO(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
- COMPARE(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
- COMPARE(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
- COMPARE(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
- COMPARE(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
+ COMPARE_MACRO(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
+ COMPARE_MACRO(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
+ COMPARE_MACRO(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
+ COMPARE_MACRO(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
// DSB
- COMPARE(Dsb(FullSystem, BarrierAll), "dsb sy");
- COMPARE(Dsb(FullSystem, BarrierReads), "dsb ld");
- COMPARE(Dsb(FullSystem, BarrierWrites), "dsb st");
+ COMPARE_MACRO(Dsb(FullSystem, BarrierAll), "dsb sy");
+ COMPARE_MACRO(Dsb(FullSystem, BarrierReads), "dsb ld");
+ COMPARE_MACRO(Dsb(FullSystem, BarrierWrites), "dsb st");
- COMPARE(Dsb(InnerShareable, BarrierAll), "dsb ish");
- COMPARE(Dsb(InnerShareable, BarrierReads), "dsb ishld");
- COMPARE(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
+ COMPARE_MACRO(Dsb(InnerShareable, BarrierAll), "dsb ish");
+ COMPARE_MACRO(Dsb(InnerShareable, BarrierReads), "dsb ishld");
+ COMPARE_MACRO(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
- COMPARE(Dsb(NonShareable, BarrierAll), "dsb nsh");
- COMPARE(Dsb(NonShareable, BarrierReads), "dsb nshld");
- COMPARE(Dsb(NonShareable, BarrierWrites), "dsb nshst");
+ COMPARE_MACRO(Dsb(NonShareable, BarrierAll), "dsb nsh");
+ COMPARE_MACRO(Dsb(NonShareable, BarrierReads), "dsb nshld");
+ COMPARE_MACRO(Dsb(NonShareable, BarrierWrites), "dsb nshst");
- COMPARE(Dsb(OuterShareable, BarrierAll), "dsb osh");
- COMPARE(Dsb(OuterShareable, BarrierReads), "dsb oshld");
- COMPARE(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
+ COMPARE_MACRO(Dsb(OuterShareable, BarrierAll), "dsb osh");
+ COMPARE_MACRO(Dsb(OuterShareable, BarrierReads), "dsb oshld");
+ COMPARE_MACRO(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
- COMPARE(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
- COMPARE(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
- COMPARE(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
- COMPARE(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
+ COMPARE_MACRO(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
+ COMPARE_MACRO(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
+ COMPARE_MACRO(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
+ COMPARE_MACRO(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
// ISB
- COMPARE(Isb(), "isb");
+ COMPARE_MACRO(Isb(), "isb");
CLEANUP();
}
#define VLIST2(v) \
- v, VRegister((v.GetCode()+1)%32, v.GetSizeInBits(), v.GetLanes())
-#define VLIST3(v) VLIST2(v), \
- VRegister((v.GetCode()+2)%32, v.GetSizeInBits(), v.GetLanes())
-#define VLIST4(v) VLIST3(v), \
- VRegister((v.GetCode()+3)%32, v.GetSizeInBits(), v.GetLanes())
+ v, VRegister((v.GetCode() + 1) % 32, v.GetSizeInBits(), v.GetLanes())
+#define VLIST3(v) \
+ VLIST2(v), VRegister((v.GetCode() + 2) % 32, v.GetSizeInBits(), v.GetLanes())
+#define VLIST4(v) \
+ VLIST3(v), VRegister((v.GetCode() + 3) % 32, v.GetSizeInBits(), v.GetLanes())
-#define NEON_FORMAT_LIST(V) \
- V(V8B(), "8b") \
- V(V16B(), "16b") \
- V(V4H(), "4h") \
- V(V8H(), "8h") \
- V(V2S(), "2s") \
- V(V4S(), "4s") \
+#define NEON_FORMAT_LIST(V) \
+ V(V8B(), "8b") \
+ V(V16B(), "16b") \
+ V(V4H(), "4h") \
+ V(V8H(), "8h") \
+ V(V2S(), "2s") \
+ V(V4S(), "4s") \
V(V2D(), "2d")
-#define NEON_FORMAT_LIST_LP(V) \
- V(V4H(), "4h", V8B(), "8b") \
- V(V2S(), "2s", V4H(), "4h") \
- V(V1D(), "1d", V2S(), "2s") \
- V(V8H(), "8h", V16B(), "16b") \
- V(V4S(), "4s", V8H(), "8h") \
+#define NEON_FORMAT_LIST_LP(V) \
+ V(V4H(), "4h", V8B(), "8b") \
+ V(V2S(), "2s", V4H(), "4h") \
+ V(V1D(), "1d", V2S(), "2s") \
+ V(V8H(), "8h", V16B(), "16b") \
+ V(V4S(), "4s", V8H(), "8h") \
V(V2D(), "2d", V4S(), "4s")
-#define NEON_FORMAT_LIST_LW(V) \
- V(V8H(), "8h", V8B(), "8b") \
- V(V4S(), "4s", V4H(), "4h") \
+#define NEON_FORMAT_LIST_LW(V) \
+ V(V8H(), "8h", V8B(), "8b") \
+ V(V4S(), "4s", V4H(), "4h") \
V(V2D(), "2d", V2S(), "2s")
-#define NEON_FORMAT_LIST_LW2(V) \
- V(V8H(), "8h", V16B(), "16b") \
- V(V4S(), "4s", V8H(), "8h") \
+#define NEON_FORMAT_LIST_LW2(V) \
+ V(V8H(), "8h", V16B(), "16b") \
+ V(V4S(), "4s", V8H(), "8h") \
V(V2D(), "2d", V4S(), "4s")
-#define NEON_FORMAT_LIST_BHS(V) \
- V(V8B(), "8b") \
- V(V16B(), "16b") \
- V(V4H(), "4h") \
- V(V8H(), "8h") \
- V(V2S(), "2s") \
+#define NEON_FORMAT_LIST_BHS(V) \
+ V(V8B(), "8b") \
+ V(V16B(), "16b") \
+ V(V4H(), "4h") \
+ V(V8H(), "8h") \
+ V(V2S(), "2s") \
V(V4S(), "4s")
-#define NEON_FORMAT_LIST_HS(V) \
- V(V4H(), "4h") \
- V(V8H(), "8h") \
- V(V2S(), "2s") \
+#define NEON_FORMAT_LIST_HS(V) \
+ V(V4H(), "4h") \
+ V(V8H(), "8h") \
+ V(V2S(), "2s") \
V(V4S(), "4s")
TEST(neon_load_store_vector) {
- SETUP_MACRO();
+ SETUP();
- #define DISASM_INST(M, S) \
- COMPARE(Ld1(v0.M, MemOperand(x15)), \
- "ld1 {v0." S "}, [x15]"); \
- COMPARE(Ld1(v1.M, v2.M, MemOperand(x16)), \
- "ld1 {v1." S ", v2." S "}, [x16]"); \
- COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
- "ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
- COMPARE(Ld1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18)), \
- "ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
- COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
- COMPARE(Ld2(v1.M, v2.M, MemOperand(x16)), \
- "ld2 {v1." S ", v2." S "}, [x16]"); \
- COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
- "ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
- COMPARE(Ld4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18)), \
- "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
- COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Ld1(v0.M, MemOperand(x15)), "ld1 {v0." S "}, [x15]"); \
+ COMPARE_MACRO(Ld1(v1.M, v2.M, MemOperand(x16)), \
+ "ld1 {v1." S ", v2." S "}, [x16]"); \
+ COMPARE_MACRO(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
+ "ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
+ COMPARE_MACRO(Ld1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18)), \
+ "ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
+ COMPARE_MACRO(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
+ COMPARE_MACRO(Ld2(v1.M, v2.M, MemOperand(x16)), \
+ "ld2 {v1." S ", v2." S "}, [x16]"); \
+ COMPARE_MACRO(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
+ "ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
+ COMPARE_MACRO(Ld4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18)), \
+ "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
+ COMPARE_MACRO(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
NEON_FORMAT_LIST(DISASM_INST);
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Ld1(v0.M, MemOperand(x15, x20, PostIndex)), \
- "ld1 {v0." S "}, [x15], x20"); \
- COMPARE(Ld1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "ld1 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "ld1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(Ld1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
- COMPARE(Ld2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "ld2 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "ld3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(Ld4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Ld1(v0.M, MemOperand(x15, x20, PostIndex)), \
+ "ld1 {v0." S "}, [x15], x20"); \
+ COMPARE_MACRO(Ld1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "ld1 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE_MACRO(Ld1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "ld1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE_MACRO(Ld1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE_MACRO(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
+ COMPARE_MACRO(Ld2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "ld2 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE_MACRO(Ld3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "ld3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE_MACRO(Ld4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE_MACRO(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
NEON_FORMAT_LIST(DISASM_INST);
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Ld1(v0.V8B(), MemOperand(x15, 8, PostIndex)),
- "ld1 {v0.8b}, [x15], #8");
- COMPARE(Ld1(v1.V16B(), MemOperand(x16, 16, PostIndex)),
- "ld1 {v1.16b}, [x16], #16");
- COMPARE(Ld1(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
- "ld1 {v2.4h, v3.4h}, [x17], #16");
- COMPARE(Ld1(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
- "ld1 {v4.8h, v5.8h}, [x18], #32");
- COMPARE(Ld1(v16.V2S(), v17.V2S(), v18.V2S(), MemOperand(x19, 24, PostIndex)),
- "ld1 {v16.2s, v17.2s, v18.2s}, [x19], #24");
- COMPARE(Ld1(v16.V4S(), v17.V4S(), v18.V4S(), MemOperand(x19, 48, PostIndex)),
- "ld1 {v16.4s, v17.4s, v18.4s}, [x19], #48");
- COMPARE(Ld1(v19.V2S(), v20.V2S(), v21.V2S(), v22.V2S(),
- MemOperand(x20, 32, PostIndex)),
- "ld1 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
- COMPARE(Ld1(v23.V2D(), v24.V2D(), v25.V2D(), v26.V2D(),
- MemOperand(x21, 64, PostIndex)),
- "ld1 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
+ COMPARE_MACRO(Ld1(v0.V8B(), MemOperand(x15, 8, PostIndex)),
+ "ld1 {v0.8b}, [x15], #8");
+ COMPARE_MACRO(Ld1(v1.V16B(), MemOperand(x16, 16, PostIndex)),
+ "ld1 {v1.16b}, [x16], #16");
+ COMPARE_MACRO(Ld1(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
+ "ld1 {v2.4h, v3.4h}, [x17], #16");
+ COMPARE_MACRO(Ld1(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
+ "ld1 {v4.8h, v5.8h}, [x18], #32");
+ COMPARE_MACRO(Ld1(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ MemOperand(x19, 24, PostIndex)),
+ "ld1 {v16.2s, v17.2s, v18.2s}, [x19], #24");
+ COMPARE_MACRO(Ld1(v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
+ MemOperand(x19, 48, PostIndex)),
+ "ld1 {v16.4s, v17.4s, v18.4s}, [x19], #48");
+ COMPARE_MACRO(Ld1(v19.V2S(),
+ v20.V2S(),
+ v21.V2S(),
+ v22.V2S(),
+ MemOperand(x20, 32, PostIndex)),
+ "ld1 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
+ COMPARE_MACRO(Ld1(v23.V2D(),
+ v24.V2D(),
+ v25.V2D(),
+ v26.V2D(),
+ MemOperand(x21, 64, PostIndex)),
+ "ld1 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
- COMPARE(Ld2(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
- "ld2 {v2.4h, v3.4h}, [x17], #16");
- COMPARE(Ld2(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
- "ld2 {v4.8h, v5.8h}, [x18], #32");
- COMPARE(Ld3(v16.V2S(), v17.V2S(), v18.V2S(), MemOperand(x19, 24, PostIndex)),
- "ld3 {v16.2s, v17.2s, v18.2s}, [x19], #24");
- COMPARE(Ld3(v16.V4S(), v17.V4S(), v18.V4S(), MemOperand(x19, 48, PostIndex)),
- "ld3 {v16.4s, v17.4s, v18.4s}, [x19], #48");
- COMPARE(Ld4(v19.V2S(), v20.V2S(), v21.V2S(), v22.V2S(),
- MemOperand(x20, 32, PostIndex)),
- "ld4 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
- COMPARE(Ld4(v23.V2D(), v24.V2D(), v25.V2D(), v26.V2D(),
- MemOperand(x21, 64, PostIndex)),
- "ld4 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
+ COMPARE_MACRO(Ld2(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
+ "ld2 {v2.4h, v3.4h}, [x17], #16");
+ COMPARE_MACRO(Ld2(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
+ "ld2 {v4.8h, v5.8h}, [x18], #32");
+ COMPARE_MACRO(Ld3(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ MemOperand(x19, 24, PostIndex)),
+ "ld3 {v16.2s, v17.2s, v18.2s}, [x19], #24");
+ COMPARE_MACRO(Ld3(v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
+ MemOperand(x19, 48, PostIndex)),
+ "ld3 {v16.4s, v17.4s, v18.4s}, [x19], #48");
+ COMPARE_MACRO(Ld4(v19.V2S(),
+ v20.V2S(),
+ v21.V2S(),
+ v22.V2S(),
+ MemOperand(x20, 32, PostIndex)),
+ "ld4 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
+ COMPARE_MACRO(Ld4(v23.V2D(),
+ v24.V2D(),
+ v25.V2D(),
+ v26.V2D(),
+ MemOperand(x21, 64, PostIndex)),
+ "ld4 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
- COMPARE(Ld1(v0.V1D(), MemOperand(x16)), "ld1 {v0.1d}, [x16]");
- COMPARE(Ld1(v1.V1D(), v2.V1D(), MemOperand(x17, 16, PostIndex)),
- "ld1 {v1.1d, v2.1d}, [x17], #16");
- COMPARE(Ld1(v3.V1D(), v4.V1D(), v5.V1D(), MemOperand(x18, x19, PostIndex)),
- "ld1 {v3.1d, v4.1d, v5.1d}, [x18], x19");
- COMPARE(Ld1(v30.V1D(), v31.V1D(), v0.V1D(), v1.V1D(),
- MemOperand(x20, 32, PostIndex)),
- "ld1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x20], #32");
- COMPARE(Ld1(d30, d31, d0, d1, MemOperand(x21, x22, PostIndex)),
- "ld1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x21], x22");
+ COMPARE_MACRO(Ld1(v0.V1D(), MemOperand(x16)), "ld1 {v0.1d}, [x16]");
+ COMPARE_MACRO(Ld1(v1.V1D(), v2.V1D(), MemOperand(x17, 16, PostIndex)),
+ "ld1 {v1.1d, v2.1d}, [x17], #16");
+ COMPARE_MACRO(Ld1(v3.V1D(),
+ v4.V1D(),
+ v5.V1D(),
+ MemOperand(x18, x19, PostIndex)),
+ "ld1 {v3.1d, v4.1d, v5.1d}, [x18], x19");
+ COMPARE_MACRO(Ld1(v30.V1D(),
+ v31.V1D(),
+ v0.V1D(),
+ v1.V1D(),
+ MemOperand(x20, 32, PostIndex)),
+ "ld1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x20], #32");
+ COMPARE_MACRO(Ld1(d30, d31, d0, d1, MemOperand(x21, x22, PostIndex)),
+ "ld1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x21], x22");
- #define DISASM_INST(M, S) \
- COMPARE(St1(v20.M, MemOperand(x15)), \
- "st1 {v20." S "}, [x15]"); \
- COMPARE(St1(v21.M, v22.M, MemOperand(x16)), \
- "st1 {v21." S ", v22." S "}, [x16]"); \
- COMPARE(St1(v23.M, v24.M, v25.M, MemOperand(x17)), \
- "st1 {v23." S ", v24." S ", v25." S "}, [x17]"); \
- COMPARE(St1(v26.M, v27.M, v28.M, v29.M, MemOperand(x18)), \
- "st1 {v26." S ", v27." S ", v28." S ", v29." S "}, [x18]") \
- COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
- COMPARE(St2(VLIST2(v21.M), MemOperand(x16)), \
- "st2 {v21." S ", v22." S "}, [x16]"); \
- COMPARE(St3(v23.M, v24.M, v25.M, MemOperand(x17)), \
- "st3 {v23." S ", v24." S ", v25." S "}, [x17]"); \
- COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]")
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(St1(v20.M, MemOperand(x15)), "st1 {v20." S "}, [x15]"); \
+ COMPARE_MACRO(St1(v21.M, v22.M, MemOperand(x16)), \
+ "st1 {v21." S ", v22." S "}, [x16]"); \
+ COMPARE_MACRO(St1(v23.M, v24.M, v25.M, MemOperand(x17)), \
+ "st1 {v23." S ", v24." S ", v25." S "}, [x17]"); \
+ COMPARE_MACRO(St1(v26.M, v27.M, v28.M, v29.M, MemOperand(x18)), \
+ "st1 {v26." S ", v27." S ", v28." S ", v29." S "}, [x18]") \
+ COMPARE_MACRO(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
+ COMPARE_MACRO(St2(VLIST2(v21.M), MemOperand(x16)), \
+ "st2 {v21." S ", v22." S "}, [x16]"); \
+ COMPARE_MACRO(St3(v23.M, v24.M, v25.M, MemOperand(x17)), \
+ "st3 {v23." S ", v24." S ", v25." S "}, [x17]"); \
+ COMPARE_MACRO(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]")
NEON_FORMAT_LIST(DISASM_INST);
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
- "st1 {v0." S "}, [x15], x20"); \
- COMPARE(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "st1 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(St1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
- COMPARE(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "st2 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(St4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24")
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
+ "st1 {v0." S "}, [x15], x20"); \
+ COMPARE_MACRO(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "st1 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE_MACRO(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE_MACRO(St1(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE_MACRO(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
+ COMPARE_MACRO(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "st2 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE_MACRO(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE_MACRO(St4(v6.M, v7.M, v8.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE_MACRO(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24")
NEON_FORMAT_LIST(DISASM_INST);
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(St1(v0.V8B(), MemOperand(x15, 8, PostIndex)),
- "st1 {v0.8b}, [x15], #8");
- COMPARE(St1(v1.V16B(), MemOperand(x16, 16, PostIndex)),
- "st1 {v1.16b}, [x16], #16");
- COMPARE(St1(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
- "st1 {v2.4h, v3.4h}, [x17], #16");
- COMPARE(St1(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
- "st1 {v4.8h, v5.8h}, [x18], #32");
- COMPARE(St1(v16.V2S(), v17.V2S(), v18.V2S(), MemOperand(x19, 24, PostIndex)),
- "st1 {v16.2s, v17.2s, v18.2s}, [x19], #24");
- COMPARE(St1(v16.V4S(), v17.V4S(), v18.V4S(), MemOperand(x19, 48, PostIndex)),
- "st1 {v16.4s, v17.4s, v18.4s}, [x19], #48");
- COMPARE(St1(v19.V2S(), v20.V2S(), v21.V2S(), v22.V2S(),
- MemOperand(x20, 32, PostIndex)),
- "st1 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
- COMPARE(St1(v23.V2D(), v24.V2D(), v25.V2D(), v26.V2D(),
- MemOperand(x21, 64, PostIndex)),
- "st1 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
- COMPARE(St2(v1.V16B(), v2.V16B(), MemOperand(x16, 32, PostIndex)),
- "st2 {v1.16b, v2.16b}, [x16], #32");
- COMPARE(St2(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
- "st2 {v2.4h, v3.4h}, [x17], #16");
- COMPARE(St2(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
- "st2 {v4.8h, v5.8h}, [x18], #32");
- COMPARE(St3(v16.V2S(), v17.V2S(), v18.V2S(),
- MemOperand(x19, 24, PostIndex)),
- "st3 {v16.2s, v17.2s, v18.2s}, [x19], #24");
- COMPARE(St3(v16.V4S(), v17.V4S(), v18.V4S(),
- MemOperand(x19, 48, PostIndex)),
- "st3 {v16.4s, v17.4s, v18.4s}, [x19], #48");
- COMPARE(St4(v19.V2S(), v20.V2S(), v21.V2S(), v22.V2S(),
- MemOperand(x20, 32, PostIndex)),
- "st4 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
- COMPARE(St4(v23.V2D(), v24.V2D(), v25.V2D(), v26.V2D(),
- MemOperand(x21, 64, PostIndex)),
- "st4 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
+ COMPARE_MACRO(St1(v0.V8B(), MemOperand(x15, 8, PostIndex)),
+ "st1 {v0.8b}, [x15], #8");
+ COMPARE_MACRO(St1(v1.V16B(), MemOperand(x16, 16, PostIndex)),
+ "st1 {v1.16b}, [x16], #16");
+ COMPARE_MACRO(St1(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
+ "st1 {v2.4h, v3.4h}, [x17], #16");
+ COMPARE_MACRO(St1(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
+ "st1 {v4.8h, v5.8h}, [x18], #32");
+ COMPARE_MACRO(St1(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ MemOperand(x19, 24, PostIndex)),
+ "st1 {v16.2s, v17.2s, v18.2s}, [x19], #24");
+ COMPARE_MACRO(St1(v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
+ MemOperand(x19, 48, PostIndex)),
+ "st1 {v16.4s, v17.4s, v18.4s}, [x19], #48");
+ COMPARE_MACRO(St1(v19.V2S(),
+ v20.V2S(),
+ v21.V2S(),
+ v22.V2S(),
+ MemOperand(x20, 32, PostIndex)),
+ "st1 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
+ COMPARE_MACRO(St1(v23.V2D(),
+ v24.V2D(),
+ v25.V2D(),
+ v26.V2D(),
+ MemOperand(x21, 64, PostIndex)),
+ "st1 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
+ COMPARE_MACRO(St2(v1.V16B(), v2.V16B(), MemOperand(x16, 32, PostIndex)),
+ "st2 {v1.16b, v2.16b}, [x16], #32");
+ COMPARE_MACRO(St2(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)),
+ "st2 {v2.4h, v3.4h}, [x17], #16");
+ COMPARE_MACRO(St2(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)),
+ "st2 {v4.8h, v5.8h}, [x18], #32");
+ COMPARE_MACRO(St3(v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
+ MemOperand(x19, 24, PostIndex)),
+ "st3 {v16.2s, v17.2s, v18.2s}, [x19], #24");
+ COMPARE_MACRO(St3(v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
+ MemOperand(x19, 48, PostIndex)),
+ "st3 {v16.4s, v17.4s, v18.4s}, [x19], #48");
+ COMPARE_MACRO(St4(v19.V2S(),
+ v20.V2S(),
+ v21.V2S(),
+ v22.V2S(),
+ MemOperand(x20, 32, PostIndex)),
+ "st4 {v19.2s, v20.2s, v21.2s, v22.2s}, [x20], #32");
+ COMPARE_MACRO(St4(v23.V2D(),
+ v24.V2D(),
+ v25.V2D(),
+ v26.V2D(),
+ MemOperand(x21, 64, PostIndex)),
+ "st4 {v23.2d, v24.2d, v25.2d, v26.2d}, [x21], #64");
- COMPARE(St1(v0.V1D(), MemOperand(x16)), "st1 {v0.1d}, [x16]");
- COMPARE(St1(v1.V1D(), v2.V1D(), MemOperand(x17, 16, PostIndex)),
- "st1 {v1.1d, v2.1d}, [x17], #16");
- COMPARE(St1(v3.V1D(), v4.V1D(), v5.V1D(), MemOperand(x18, x19, PostIndex)),
- "st1 {v3.1d, v4.1d, v5.1d}, [x18], x19");
- COMPARE(St1(v30.V1D(), v31.V1D(), v0.V1D(), v1.V1D(),
- MemOperand(x20, 32, PostIndex)),
- "st1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x20], #32");
- COMPARE(St1(d30, d31, d0, d1, MemOperand(x21, x22, PostIndex)),
- "st1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x21], x22");
+ COMPARE_MACRO(St1(v0.V1D(), MemOperand(x16)), "st1 {v0.1d}, [x16]");
+ COMPARE_MACRO(St1(v1.V1D(), v2.V1D(), MemOperand(x17, 16, PostIndex)),
+ "st1 {v1.1d, v2.1d}, [x17], #16");
+ COMPARE_MACRO(St1(v3.V1D(),
+ v4.V1D(),
+ v5.V1D(),
+ MemOperand(x18, x19, PostIndex)),
+ "st1 {v3.1d, v4.1d, v5.1d}, [x18], x19");
+ COMPARE_MACRO(St1(v30.V1D(),
+ v31.V1D(),
+ v0.V1D(),
+ v1.V1D(),
+ MemOperand(x20, 32, PostIndex)),
+ "st1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x20], #32");
+ COMPARE_MACRO(St1(d30, d31, d0, d1, MemOperand(x21, x22, PostIndex)),
+ "st1 {v30.1d, v31.1d, v0.1d, v1.1d}, [x21], x22");
CLEANUP();
}
@@ -3331,416 +3416,515 @@
TEST(neon_load_store_lane) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Ld1(v0.V8B(), 0, MemOperand(x15)), "ld1 {v0.b}[0], [x15]");
- COMPARE(Ld1(v1.V16B(), 1, MemOperand(x16)), "ld1 {v1.b}[1], [x16]");
- COMPARE(Ld1(v2.V4H(), 2, MemOperand(x17)), "ld1 {v2.h}[2], [x17]");
- COMPARE(Ld1(v3.V8H(), 3, MemOperand(x18)), "ld1 {v3.h}[3], [x18]");
- COMPARE(Ld1(v4.V2S(), 0, MemOperand(x19)), "ld1 {v4.s}[0], [x19]");
- COMPARE(Ld1(v5.V4S(), 1, MemOperand(x20)), "ld1 {v5.s}[1], [x20]");
- COMPARE(Ld1(v6.V2D(), 0, MemOperand(x21)), "ld1 {v6.d}[0], [x21]");
- COMPARE(Ld1(v7.B(), 7, MemOperand(x22)), "ld1 {v7.b}[7], [x22]");
- COMPARE(Ld1(v8.B(), 15, MemOperand(x23)), "ld1 {v8.b}[15], [x23]");
- COMPARE(Ld1(v9.H(), 3, MemOperand(x24)), "ld1 {v9.h}[3], [x24]");
- COMPARE(Ld1(v10.H(), 7, MemOperand(x25)), "ld1 {v10.h}[7], [x25]");
- COMPARE(Ld1(v11.S(), 1, MemOperand(x26)), "ld1 {v11.s}[1], [x26]");
- COMPARE(Ld1(v12.S(), 3, MemOperand(x27)), "ld1 {v12.s}[3], [x27]");
- COMPARE(Ld1(v13.D(), 1, MemOperand(sp)), "ld1 {v13.d}[1], [sp]");
+ COMPARE_MACRO(Ld1(v0.V8B(), 0, MemOperand(x15)), "ld1 {v0.b}[0], [x15]");
+ COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16)), "ld1 {v1.b}[1], [x16]");
+ COMPARE_MACRO(Ld1(v2.V4H(), 2, MemOperand(x17)), "ld1 {v2.h}[2], [x17]");
+ COMPARE_MACRO(Ld1(v3.V8H(), 3, MemOperand(x18)), "ld1 {v3.h}[3], [x18]");
+ COMPARE_MACRO(Ld1(v4.V2S(), 0, MemOperand(x19)), "ld1 {v4.s}[0], [x19]");
+ COMPARE_MACRO(Ld1(v5.V4S(), 1, MemOperand(x20)), "ld1 {v5.s}[1], [x20]");
+ COMPARE_MACRO(Ld1(v6.V2D(), 0, MemOperand(x21)), "ld1 {v6.d}[0], [x21]");
+ COMPARE_MACRO(Ld1(v7.B(), 7, MemOperand(x22)), "ld1 {v7.b}[7], [x22]");
+ COMPARE_MACRO(Ld1(v8.B(), 15, MemOperand(x23)), "ld1 {v8.b}[15], [x23]");
+ COMPARE_MACRO(Ld1(v9.H(), 3, MemOperand(x24)), "ld1 {v9.h}[3], [x24]");
+ COMPARE_MACRO(Ld1(v10.H(), 7, MemOperand(x25)), "ld1 {v10.h}[7], [x25]");
+ COMPARE_MACRO(Ld1(v11.S(), 1, MemOperand(x26)), "ld1 {v11.s}[1], [x26]");
+ COMPARE_MACRO(Ld1(v12.S(), 3, MemOperand(x27)), "ld1 {v12.s}[3], [x27]");
+ COMPARE_MACRO(Ld1(v13.D(), 1, MemOperand(sp)), "ld1 {v13.d}[1], [sp]");
- COMPARE(Ld1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
- "ld1 {v0.b}[0], [x15], x0");
- COMPARE(Ld1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)),
- "ld1 {v1.b}[1], [x16], #1");
- COMPARE(Ld1(v2.V4H(), 2, MemOperand(x17, 2, PostIndex)),
- "ld1 {v2.h}[2], [x17], #2");
- COMPARE(Ld1(v3.V8H(), 3, MemOperand(x18, x1, PostIndex)),
- "ld1 {v3.h}[3], [x18], x1");
- COMPARE(Ld1(v4.V2S(), 0, MemOperand(x19, x2, PostIndex)),
- "ld1 {v4.s}[0], [x19], x2");
- COMPARE(Ld1(v5.V4S(), 1, MemOperand(x20, 4, PostIndex)),
- "ld1 {v5.s}[1], [x20], #4");
- COMPARE(Ld1(v6.V2D(), 0, MemOperand(x21, 8, PostIndex)),
- "ld1 {v6.d}[0], [x21], #8");
- COMPARE(Ld1(v7.B(), 7, MemOperand(x22, 1, PostIndex)),
- "ld1 {v7.b}[7], [x22], #1");
- COMPARE(Ld1(v8.B(), 15, MemOperand(x23, x3, PostIndex)),
- "ld1 {v8.b}[15], [x23], x3");
- COMPARE(Ld1(v9.H(), 3, MemOperand(x24, x4, PostIndex)),
- "ld1 {v9.h}[3], [x24], x4");
- COMPARE(Ld1(v10.H(), 7, MemOperand(x25, 2, PostIndex)),
- "ld1 {v10.h}[7], [x25], #2");
- COMPARE(Ld1(v11.S(), 1, MemOperand(x26, 4, PostIndex)),
- "ld1 {v11.s}[1], [x26], #4");
- COMPARE(Ld1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
- "ld1 {v12.s}[3], [x27], x5");
- COMPARE(Ld1(v12.S(), 3, MemOperand(x27, 4, PostIndex)),
- "ld1 {v12.s}[3], [x27], #4");
- COMPARE(Ld1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
- "ld1 {v13.d}[1], [sp], x6");
- COMPARE(Ld1(v13.D(), 1, MemOperand(sp, 8, PostIndex)),
- "ld1 {v13.d}[1], [sp], #8");
+ COMPARE_MACRO(Ld1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
+ "ld1 {v0.b}[0], [x15], x0");
+ COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)),
+ "ld1 {v1.b}[1], [x16], #1");
+ COMPARE_MACRO(Ld1(v2.V4H(), 2, MemOperand(x17, 2, PostIndex)),
+ "ld1 {v2.h}[2], [x17], #2");
+ COMPARE_MACRO(Ld1(v3.V8H(), 3, MemOperand(x18, x1, PostIndex)),
+ "ld1 {v3.h}[3], [x18], x1");
+ COMPARE_MACRO(Ld1(v4.V2S(), 0, MemOperand(x19, x2, PostIndex)),
+ "ld1 {v4.s}[0], [x19], x2");
+ COMPARE_MACRO(Ld1(v5.V4S(), 1, MemOperand(x20, 4, PostIndex)),
+ "ld1 {v5.s}[1], [x20], #4");
+ COMPARE_MACRO(Ld1(v6.V2D(), 0, MemOperand(x21, 8, PostIndex)),
+ "ld1 {v6.d}[0], [x21], #8");
+ COMPARE_MACRO(Ld1(v7.B(), 7, MemOperand(x22, 1, PostIndex)),
+ "ld1 {v7.b}[7], [x22], #1");
+ COMPARE_MACRO(Ld1(v8.B(), 15, MemOperand(x23, x3, PostIndex)),
+ "ld1 {v8.b}[15], [x23], x3");
+ COMPARE_MACRO(Ld1(v9.H(), 3, MemOperand(x24, x4, PostIndex)),
+ "ld1 {v9.h}[3], [x24], x4");
+ COMPARE_MACRO(Ld1(v10.H(), 7, MemOperand(x25, 2, PostIndex)),
+ "ld1 {v10.h}[7], [x25], #2");
+ COMPARE_MACRO(Ld1(v11.S(), 1, MemOperand(x26, 4, PostIndex)),
+ "ld1 {v11.s}[1], [x26], #4");
+ COMPARE_MACRO(Ld1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
+ "ld1 {v12.s}[3], [x27], x5");
+ COMPARE_MACRO(Ld1(v12.S(), 3, MemOperand(x27, 4, PostIndex)),
+ "ld1 {v12.s}[3], [x27], #4");
+ COMPARE_MACRO(Ld1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld1 {v13.d}[1], [sp], x6");
+ COMPARE_MACRO(Ld1(v13.D(), 1, MemOperand(sp, 8, PostIndex)),
+ "ld1 {v13.d}[1], [sp], #8");
- COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15)),
- "ld2 {v0.b, v1.b}[0], [x15]");
- COMPARE(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16)),
- "ld2 {v1.b, v2.b}[1], [x16]");
- COMPARE(Ld2(v2.V4H(), v3.V4H(), 2, MemOperand(x17)),
- "ld2 {v2.h, v3.h}[2], [x17]");
- COMPARE(Ld2(v3.V8H(), v4.V8H(), 3, MemOperand(x18)),
- "ld2 {v3.h, v4.h}[3], [x18]");
- COMPARE(Ld2(v4.V2S(), v5.V2S(), 0, MemOperand(x19)),
- "ld2 {v4.s, v5.s}[0], [x19]");
- COMPARE(Ld2(v5.V4S(), v6.V4S(), 1, MemOperand(x20)),
- "ld2 {v5.s, v6.s}[1], [x20]");
- COMPARE(Ld2(v6.V2D(), v7.V2D(), 0, MemOperand(x21)),
- "ld2 {v6.d, v7.d}[0], [x21]");
- COMPARE(Ld2(v7.B(), v8.B(), 7, MemOperand(x22)),
- "ld2 {v7.b, v8.b}[7], [x22]");
- COMPARE(Ld2(v8.B(), v9.B(), 15, MemOperand(x23)),
- "ld2 {v8.b, v9.b}[15], [x23]");
- COMPARE(Ld2(v9.H(), v10.H(), 3, MemOperand(x24)),
- "ld2 {v9.h, v10.h}[3], [x24]");
- COMPARE(Ld2(v10.H(), v11.H(), 7, MemOperand(x25)),
- "ld2 {v10.h, v11.h}[7], [x25]");
- COMPARE(Ld2(v11.S(), v12.S(), 1, MemOperand(x26)),
- "ld2 {v11.s, v12.s}[1], [x26]");
- COMPARE(Ld2(v12.S(), v13.S(), 3, MemOperand(x27)),
- "ld2 {v12.s, v13.s}[3], [x27]");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp)),
- "ld2 {v13.d, v14.d}[1], [sp]");
+ COMPARE_MACRO(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15)),
+ "ld2 {v0.b, v1.b}[0], [x15]");
+ COMPARE_MACRO(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16)),
+ "ld2 {v1.b, v2.b}[1], [x16]");
+ COMPARE_MACRO(Ld2(v2.V4H(), v3.V4H(), 2, MemOperand(x17)),
+ "ld2 {v2.h, v3.h}[2], [x17]");
+ COMPARE_MACRO(Ld2(v3.V8H(), v4.V8H(), 3, MemOperand(x18)),
+ "ld2 {v3.h, v4.h}[3], [x18]");
+ COMPARE_MACRO(Ld2(v4.V2S(), v5.V2S(), 0, MemOperand(x19)),
+ "ld2 {v4.s, v5.s}[0], [x19]");
+ COMPARE_MACRO(Ld2(v5.V4S(), v6.V4S(), 1, MemOperand(x20)),
+ "ld2 {v5.s, v6.s}[1], [x20]");
+ COMPARE_MACRO(Ld2(v6.V2D(), v7.V2D(), 0, MemOperand(x21)),
+ "ld2 {v6.d, v7.d}[0], [x21]");
+ COMPARE_MACRO(Ld2(v7.B(), v8.B(), 7, MemOperand(x22)),
+ "ld2 {v7.b, v8.b}[7], [x22]");
+ COMPARE_MACRO(Ld2(v8.B(), v9.B(), 15, MemOperand(x23)),
+ "ld2 {v8.b, v9.b}[15], [x23]");
+ COMPARE_MACRO(Ld2(v9.H(), v10.H(), 3, MemOperand(x24)),
+ "ld2 {v9.h, v10.h}[3], [x24]");
+ COMPARE_MACRO(Ld2(v10.H(), v11.H(), 7, MemOperand(x25)),
+ "ld2 {v10.h, v11.h}[7], [x25]");
+ COMPARE_MACRO(Ld2(v11.S(), v12.S(), 1, MemOperand(x26)),
+ "ld2 {v11.s, v12.s}[1], [x26]");
+ COMPARE_MACRO(Ld2(v12.S(), v13.S(), 3, MemOperand(x27)),
+ "ld2 {v12.s, v13.s}[3], [x27]");
+ COMPARE_MACRO(Ld2(v13.D(), v14.D(), 1, MemOperand(sp)),
+ "ld2 {v13.d, v14.d}[1], [sp]");
- COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
- "ld2 {v0.b, v1.b}[0], [x15], x0");
- COMPARE(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
- "ld2 {v1.b, v2.b}[1], [x16], #2");
- COMPARE(Ld2(v2.V4H(), v3.V4H(), 2, MemOperand(x17, 4, PostIndex)),
- "ld2 {v2.h, v3.h}[2], [x17], #4");
- COMPARE(Ld2(v3.V8H(), v4.V8H(), 3, MemOperand(x18, x1, PostIndex)),
- "ld2 {v3.h, v4.h}[3], [x18], x1");
- COMPARE(Ld2(v4.V2S(), v5.V2S(), 0, MemOperand(x19, x2, PostIndex)),
- "ld2 {v4.s, v5.s}[0], [x19], x2");
- COMPARE(Ld2(v5.V4S(), v6.V4S(), 1, MemOperand(x20, 8, PostIndex)),
- "ld2 {v5.s, v6.s}[1], [x20], #8");
- COMPARE(Ld2(v6.V2D(), v7.V2D(), 0, MemOperand(x21, 16, PostIndex)),
- "ld2 {v6.d, v7.d}[0], [x21], #16");
- COMPARE(Ld2(v7.B(), v8.B(), 7, MemOperand(x22, 2, PostIndex)),
- "ld2 {v7.b, v8.b}[7], [x22], #2");
- COMPARE(Ld2(v8.B(), v9.B(), 15, MemOperand(x23, x3, PostIndex)),
- "ld2 {v8.b, v9.b}[15], [x23], x3");
- COMPARE(Ld2(v9.H(), v10.H(), 3, MemOperand(x24, x4, PostIndex)),
- "ld2 {v9.h, v10.h}[3], [x24], x4");
- COMPARE(Ld2(v10.H(), v11.H(), 7, MemOperand(x25, 4, PostIndex)),
- "ld2 {v10.h, v11.h}[7], [x25], #4");
- COMPARE(Ld2(v11.S(), v12.S(), 1, MemOperand(x26, 8, PostIndex)),
- "ld2 {v11.s, v12.s}[1], [x26], #8");
- COMPARE(Ld2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
- "ld2 {v12.s, v13.s}[3], [x27], x5");
- COMPARE(Ld2(v11.S(), v12.S(), 3, MemOperand(x26, 8, PostIndex)),
- "ld2 {v11.s, v12.s}[3], [x26], #8");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
- "ld2 {v13.d, v14.d}[1], [sp], x6");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, 16, PostIndex)),
- "ld2 {v13.d, v14.d}[1], [sp], #16");
+ COMPARE_MACRO(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
+ "ld2 {v0.b, v1.b}[0], [x15], x0");
+ COMPARE_MACRO(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
+ "ld2 {v1.b, v2.b}[1], [x16], #2");
+ COMPARE_MACRO(Ld2(v2.V4H(), v3.V4H(), 2, MemOperand(x17, 4, PostIndex)),
+ "ld2 {v2.h, v3.h}[2], [x17], #4");
+ COMPARE_MACRO(Ld2(v3.V8H(), v4.V8H(), 3, MemOperand(x18, x1, PostIndex)),
+ "ld2 {v3.h, v4.h}[3], [x18], x1");
+ COMPARE_MACRO(Ld2(v4.V2S(), v5.V2S(), 0, MemOperand(x19, x2, PostIndex)),
+ "ld2 {v4.s, v5.s}[0], [x19], x2");
+ COMPARE_MACRO(Ld2(v5.V4S(), v6.V4S(), 1, MemOperand(x20, 8, PostIndex)),
+ "ld2 {v5.s, v6.s}[1], [x20], #8");
+ COMPARE_MACRO(Ld2(v6.V2D(), v7.V2D(), 0, MemOperand(x21, 16, PostIndex)),
+ "ld2 {v6.d, v7.d}[0], [x21], #16");
+ COMPARE_MACRO(Ld2(v7.B(), v8.B(), 7, MemOperand(x22, 2, PostIndex)),
+ "ld2 {v7.b, v8.b}[7], [x22], #2");
+ COMPARE_MACRO(Ld2(v8.B(), v9.B(), 15, MemOperand(x23, x3, PostIndex)),
+ "ld2 {v8.b, v9.b}[15], [x23], x3");
+ COMPARE_MACRO(Ld2(v9.H(), v10.H(), 3, MemOperand(x24, x4, PostIndex)),
+ "ld2 {v9.h, v10.h}[3], [x24], x4");
+ COMPARE_MACRO(Ld2(v10.H(), v11.H(), 7, MemOperand(x25, 4, PostIndex)),
+ "ld2 {v10.h, v11.h}[7], [x25], #4");
+ COMPARE_MACRO(Ld2(v11.S(), v12.S(), 1, MemOperand(x26, 8, PostIndex)),
+ "ld2 {v11.s, v12.s}[1], [x26], #8");
+ COMPARE_MACRO(Ld2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
+ "ld2 {v12.s, v13.s}[3], [x27], x5");
+ COMPARE_MACRO(Ld2(v11.S(), v12.S(), 3, MemOperand(x26, 8, PostIndex)),
+ "ld2 {v11.s, v12.s}[3], [x26], #8");
+ COMPARE_MACRO(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld2 {v13.d, v14.d}[1], [sp], x6");
+ COMPARE_MACRO(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, 16, PostIndex)),
+ "ld2 {v13.d, v14.d}[1], [sp], #16");
- COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15)),
- "ld3 {v0.b, v1.b, v2.b}[0], [x15]");
- COMPARE(Ld3(v1.V16B(), v2.V16B(), v3.V16B(), 1, MemOperand(x16)),
- "ld3 {v1.b, v2.b, v3.b}[1], [x16]");
- COMPARE(Ld3(v2.V4H(), v3.V4H(), v4.V4H(), 2, MemOperand(x17)),
- "ld3 {v2.h, v3.h, v4.h}[2], [x17]");
- COMPARE(Ld3(v3.V8H(), v4.V8H(), v5.V8H(), 3, MemOperand(x18)),
- "ld3 {v3.h, v4.h, v5.h}[3], [x18]");
- COMPARE(Ld3(v4.V2S(), v5.V2S(), v6.V2S(), 0, MemOperand(x19)),
- "ld3 {v4.s, v5.s, v6.s}[0], [x19]");
- COMPARE(Ld3(v5.V4S(), v6.V4S(), v7.V4S(), 1, MemOperand(x20)),
- "ld3 {v5.s, v6.s, v7.s}[1], [x20]");
- COMPARE(Ld3(v6.V2D(), v7.V2D(), v8.V2D(), 0, MemOperand(x21)),
- "ld3 {v6.d, v7.d, v8.d}[0], [x21]");
- COMPARE(Ld3(v7.B(), v8.B(), v9.B(), 7, MemOperand(x22)),
- "ld3 {v7.b, v8.b, v9.b}[7], [x22]");
- COMPARE(Ld3(v8.B(), v9.B(), v10.B(), 15, MemOperand(x23)),
- "ld3 {v8.b, v9.b, v10.b}[15], [x23]");
- COMPARE(Ld3(v9.H(), v10.H(), v11.H(), 3, MemOperand(x24)),
- "ld3 {v9.h, v10.h, v11.h}[3], [x24]");
- COMPARE(Ld3(v10.H(), v11.H(), v12.H(), 7, MemOperand(x25)),
- "ld3 {v10.h, v11.h, v12.h}[7], [x25]");
- COMPARE(Ld3(v11.S(), v12.S(), v13.S(), 1, MemOperand(x26)),
- "ld3 {v11.s, v12.s, v13.s}[1], [x26]");
- COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27)),
- "ld3 {v12.s, v13.s, v14.s}[3], [x27]");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp)),
- "ld3 {v13.d, v14.d, v15.d}[1], [sp]");
+ COMPARE_MACRO(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15)),
+ "ld3 {v0.b, v1.b, v2.b}[0], [x15]");
+ COMPARE_MACRO(Ld3(v1.V16B(), v2.V16B(), v3.V16B(), 1, MemOperand(x16)),
+ "ld3 {v1.b, v2.b, v3.b}[1], [x16]");
+ COMPARE_MACRO(Ld3(v2.V4H(), v3.V4H(), v4.V4H(), 2, MemOperand(x17)),
+ "ld3 {v2.h, v3.h, v4.h}[2], [x17]");
+ COMPARE_MACRO(Ld3(v3.V8H(), v4.V8H(), v5.V8H(), 3, MemOperand(x18)),
+ "ld3 {v3.h, v4.h, v5.h}[3], [x18]");
+ COMPARE_MACRO(Ld3(v4.V2S(), v5.V2S(), v6.V2S(), 0, MemOperand(x19)),
+ "ld3 {v4.s, v5.s, v6.s}[0], [x19]");
+ COMPARE_MACRO(Ld3(v5.V4S(), v6.V4S(), v7.V4S(), 1, MemOperand(x20)),
+ "ld3 {v5.s, v6.s, v7.s}[1], [x20]");
+ COMPARE_MACRO(Ld3(v6.V2D(), v7.V2D(), v8.V2D(), 0, MemOperand(x21)),
+ "ld3 {v6.d, v7.d, v8.d}[0], [x21]");
+ COMPARE_MACRO(Ld3(v7.B(), v8.B(), v9.B(), 7, MemOperand(x22)),
+ "ld3 {v7.b, v8.b, v9.b}[7], [x22]");
+ COMPARE_MACRO(Ld3(v8.B(), v9.B(), v10.B(), 15, MemOperand(x23)),
+ "ld3 {v8.b, v9.b, v10.b}[15], [x23]");
+ COMPARE_MACRO(Ld3(v9.H(), v10.H(), v11.H(), 3, MemOperand(x24)),
+ "ld3 {v9.h, v10.h, v11.h}[3], [x24]");
+ COMPARE_MACRO(Ld3(v10.H(), v11.H(), v12.H(), 7, MemOperand(x25)),
+ "ld3 {v10.h, v11.h, v12.h}[7], [x25]");
+ COMPARE_MACRO(Ld3(v11.S(), v12.S(), v13.S(), 1, MemOperand(x26)),
+ "ld3 {v11.s, v12.s, v13.s}[1], [x26]");
+ COMPARE_MACRO(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27)),
+ "ld3 {v12.s, v13.s, v14.s}[3], [x27]");
+ COMPARE_MACRO(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp]");
- COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0,
- MemOperand(x15, x0, PostIndex)),
- "ld3 {v0.b, v1.b, v2.b}[0], [x15], x0");
- COMPARE(Ld3(v1.V16B(), v2.V16B(), v3.V16B(), 1,
- MemOperand(x16, 3, PostIndex)),
- "ld3 {v1.b, v2.b, v3.b}[1], [x16], #3");
- COMPARE(Ld3(v2.V4H(), v3.V4H(), v4.V4H(), 2,
- MemOperand(x17, 6, PostIndex)),
- "ld3 {v2.h, v3.h, v4.h}[2], [x17], #6");
- COMPARE(Ld3(v3.V8H(), v4.V8H(), v5.V8H(), 3,
- MemOperand(x18, x1, PostIndex)),
- "ld3 {v3.h, v4.h, v5.h}[3], [x18], x1");
- COMPARE(Ld3(v4.V2S(), v5.V2S(), v6.V2S(), 0,
- MemOperand(x19, x2, PostIndex)),
- "ld3 {v4.s, v5.s, v6.s}[0], [x19], x2");
- COMPARE(Ld3(v5.V4S(), v6.V4S(), v7.V4S(), 1,
- MemOperand(x20, 12, PostIndex)),
- "ld3 {v5.s, v6.s, v7.s}[1], [x20], #12");
- COMPARE(Ld3(v6.V2D(), v7.V2D(), v8.V2D(), 0,
- MemOperand(x21, 24, PostIndex)),
- "ld3 {v6.d, v7.d, v8.d}[0], [x21], #24");
- COMPARE(Ld3(v7.B(), v8.B(), v9.B(), 7,
- MemOperand(x22, 3, PostIndex)),
- "ld3 {v7.b, v8.b, v9.b}[7], [x22], #3");
- COMPARE(Ld3(v8.B(), v9.B(), v10.B(), 15,
- MemOperand(x23, x3, PostIndex)),
- "ld3 {v8.b, v9.b, v10.b}[15], [x23], x3");
- COMPARE(Ld3(v9.H(), v10.H(), v11.H(), 3,
- MemOperand(x24, x4, PostIndex)),
- "ld3 {v9.h, v10.h, v11.h}[3], [x24], x4");
- COMPARE(Ld3(v10.H(), v11.H(), v12.H(), 7,
- MemOperand(x25, 6, PostIndex)),
- "ld3 {v10.h, v11.h, v12.h}[7], [x25], #6");
- COMPARE(Ld3(v11.S(), v12.S(), v13.S(), 1,
- MemOperand(x26, 12, PostIndex)),
- "ld3 {v11.s, v12.s, v13.s}[1], [x26], #12");
- COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3,
- MemOperand(x27, x5, PostIndex)),
- "ld3 {v12.s, v13.s, v14.s}[3], [x27], x5");
- COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3,
- MemOperand(x27, 12, PostIndex)),
- "ld3 {v12.s, v13.s, v14.s}[3], [x27], #12");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1,
- MemOperand(sp, x6, PostIndex)),
- "ld3 {v13.d, v14.d, v15.d}[1], [sp], x6");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1,
- MemOperand(sp, 24, PostIndex)),
- "ld3 {v13.d, v14.d, v15.d}[1], [sp], #24");
+ COMPARE_MACRO(Ld3(v0.V8B(),
+ v1.V8B(),
+ v2.V8B(),
+ 0,
+ MemOperand(x15, x0, PostIndex)),
+ "ld3 {v0.b, v1.b, v2.b}[0], [x15], x0");
+ COMPARE_MACRO(Ld3(v1.V16B(),
+ v2.V16B(),
+ v3.V16B(),
+ 1,
+ MemOperand(x16, 3, PostIndex)),
+ "ld3 {v1.b, v2.b, v3.b}[1], [x16], #3");
+ COMPARE_MACRO(Ld3(v2.V4H(),
+ v3.V4H(),
+ v4.V4H(),
+ 2,
+ MemOperand(x17, 6, PostIndex)),
+ "ld3 {v2.h, v3.h, v4.h}[2], [x17], #6");
+ COMPARE_MACRO(Ld3(v3.V8H(),
+ v4.V8H(),
+ v5.V8H(),
+ 3,
+ MemOperand(x18, x1, PostIndex)),
+ "ld3 {v3.h, v4.h, v5.h}[3], [x18], x1");
+ COMPARE_MACRO(Ld3(v4.V2S(),
+ v5.V2S(),
+ v6.V2S(),
+ 0,
+ MemOperand(x19, x2, PostIndex)),
+ "ld3 {v4.s, v5.s, v6.s}[0], [x19], x2");
+ COMPARE_MACRO(Ld3(v5.V4S(),
+ v6.V4S(),
+ v7.V4S(),
+ 1,
+ MemOperand(x20, 12, PostIndex)),
+ "ld3 {v5.s, v6.s, v7.s}[1], [x20], #12");
+ COMPARE_MACRO(Ld3(v6.V2D(),
+ v7.V2D(),
+ v8.V2D(),
+ 0,
+ MemOperand(x21, 24, PostIndex)),
+ "ld3 {v6.d, v7.d, v8.d}[0], [x21], #24");
+ COMPARE_MACRO(Ld3(v7.B(), v8.B(), v9.B(), 7, MemOperand(x22, 3, PostIndex)),
+ "ld3 {v7.b, v8.b, v9.b}[7], [x22], #3");
+ COMPARE_MACRO(Ld3(v8.B(),
+ v9.B(),
+ v10.B(),
+ 15,
+ MemOperand(x23, x3, PostIndex)),
+ "ld3 {v8.b, v9.b, v10.b}[15], [x23], x3");
+ COMPARE_MACRO(Ld3(v9.H(),
+ v10.H(),
+ v11.H(),
+ 3,
+ MemOperand(x24, x4, PostIndex)),
+ "ld3 {v9.h, v10.h, v11.h}[3], [x24], x4");
+ COMPARE_MACRO(Ld3(v10.H(),
+ v11.H(),
+ v12.H(),
+ 7,
+ MemOperand(x25, 6, PostIndex)),
+ "ld3 {v10.h, v11.h, v12.h}[7], [x25], #6");
+ COMPARE_MACRO(Ld3(v11.S(),
+ v12.S(),
+ v13.S(),
+ 1,
+ MemOperand(x26, 12, PostIndex)),
+ "ld3 {v11.s, v12.s, v13.s}[1], [x26], #12");
+ COMPARE_MACRO(Ld3(v12.S(),
+ v13.S(),
+ v14.S(),
+ 3,
+ MemOperand(x27, x5, PostIndex)),
+ "ld3 {v12.s, v13.s, v14.s}[3], [x27], x5");
+ COMPARE_MACRO(Ld3(v12.S(),
+ v13.S(),
+ v14.S(),
+ 3,
+ MemOperand(x27, 12, PostIndex)),
+ "ld3 {v12.s, v13.s, v14.s}[3], [x27], #12");
+ COMPARE_MACRO(Ld3(v13.D(),
+ v14.D(),
+ v15.D(),
+ 1,
+ MemOperand(sp, x6, PostIndex)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp], x6");
+ COMPARE_MACRO(Ld3(v13.D(),
+ v14.D(),
+ v15.D(),
+ 1,
+ MemOperand(sp, 24, PostIndex)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp], #24");
- COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0,
- MemOperand(x15)),
- "ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15]");
- COMPARE(Ld4(v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), 1,
- MemOperand(x16)),
- "ld4 {v1.b, v2.b, v3.b, v4.b}[1], [x16]");
- COMPARE(Ld4(v2.V4H(), v3.V4H(), v4.V4H(), v5.V4H(), 2,
- MemOperand(x17)),
- "ld4 {v2.h, v3.h, v4.h, v5.h}[2], [x17]");
- COMPARE(Ld4(v3.V8H(), v4.V8H(), v5.V8H(), v6.V8H(), 3,
- MemOperand(x18)),
- "ld4 {v3.h, v4.h, v5.h, v6.h}[3], [x18]");
- COMPARE(Ld4(v4.V2S(), v5.V2S(), v6.V2S(), v7.V2S(), 0,
- MemOperand(x19)),
- "ld4 {v4.s, v5.s, v6.s, v7.s}[0], [x19]");
- COMPARE(Ld4(v5.V4S(), v6.V4S(), v7.V4S(), v8.V4S(), 1,
- MemOperand(x20)),
- "ld4 {v5.s, v6.s, v7.s, v8.s}[1], [x20]");
- COMPARE(Ld4(v6.V2D(), v7.V2D(), v8.V2D(), v9.V2D(), 0,
- MemOperand(x21)),
- "ld4 {v6.d, v7.d, v8.d, v9.d}[0], [x21]");
- COMPARE(Ld4(v7.B(), v8.B(), v9.B(), v10.B(), 7,
- MemOperand(x22)),
- "ld4 {v7.b, v8.b, v9.b, v10.b}[7], [x22]");
- COMPARE(Ld4(v8.B(), v9.B(), v10.B(), v11.B(), 15,
- MemOperand(x23)),
- "ld4 {v8.b, v9.b, v10.b, v11.b}[15], [x23]");
- COMPARE(Ld4(v9.H(), v10.H(), v11.H(), v12.H(), 3,
- MemOperand(x24)),
- "ld4 {v9.h, v10.h, v11.h, v12.h}[3], [x24]");
- COMPARE(Ld4(v10.H(), v11.H(), v12.H(), v13.H(), 7,
- MemOperand(x25)),
- "ld4 {v10.h, v11.h, v12.h, v13.h}[7], [x25]");
- COMPARE(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 1,
- MemOperand(x26)),
- "ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26]");
- COMPARE(Ld4(v12.S(), v13.S(), v14.S(), v15.S(), 3,
- MemOperand(x27)),
- "ld4 {v12.s, v13.s, v14.s, v15.s}[3], [x27]");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
- MemOperand(sp)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp]");
+ COMPARE_MACRO(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0, MemOperand(x15)),
+ "ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15]");
+ COMPARE_MACRO(Ld4(v1.V16B(),
+ v2.V16B(),
+ v3.V16B(),
+ v4.V16B(),
+ 1,
+ MemOperand(x16)),
+ "ld4 {v1.b, v2.b, v3.b, v4.b}[1], [x16]");
+ COMPARE_MACRO(Ld4(v2.V4H(), v3.V4H(), v4.V4H(), v5.V4H(), 2, MemOperand(x17)),
+ "ld4 {v2.h, v3.h, v4.h, v5.h}[2], [x17]");
+ COMPARE_MACRO(Ld4(v3.V8H(), v4.V8H(), v5.V8H(), v6.V8H(), 3, MemOperand(x18)),
+ "ld4 {v3.h, v4.h, v5.h, v6.h}[3], [x18]");
+ COMPARE_MACRO(Ld4(v4.V2S(), v5.V2S(), v6.V2S(), v7.V2S(), 0, MemOperand(x19)),
+ "ld4 {v4.s, v5.s, v6.s, v7.s}[0], [x19]");
+ COMPARE_MACRO(Ld4(v5.V4S(), v6.V4S(), v7.V4S(), v8.V4S(), 1, MemOperand(x20)),
+ "ld4 {v5.s, v6.s, v7.s, v8.s}[1], [x20]");
+ COMPARE_MACRO(Ld4(v6.V2D(), v7.V2D(), v8.V2D(), v9.V2D(), 0, MemOperand(x21)),
+ "ld4 {v6.d, v7.d, v8.d, v9.d}[0], [x21]");
+ COMPARE_MACRO(Ld4(v7.B(), v8.B(), v9.B(), v10.B(), 7, MemOperand(x22)),
+ "ld4 {v7.b, v8.b, v9.b, v10.b}[7], [x22]");
+ COMPARE_MACRO(Ld4(v8.B(), v9.B(), v10.B(), v11.B(), 15, MemOperand(x23)),
+ "ld4 {v8.b, v9.b, v10.b, v11.b}[15], [x23]");
+ COMPARE_MACRO(Ld4(v9.H(), v10.H(), v11.H(), v12.H(), 3, MemOperand(x24)),
+ "ld4 {v9.h, v10.h, v11.h, v12.h}[3], [x24]");
+ COMPARE_MACRO(Ld4(v10.H(), v11.H(), v12.H(), v13.H(), 7, MemOperand(x25)),
+ "ld4 {v10.h, v11.h, v12.h, v13.h}[7], [x25]");
+ COMPARE_MACRO(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 1, MemOperand(x26)),
+ "ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26]");
+ COMPARE_MACRO(Ld4(v12.S(), v13.S(), v14.S(), v15.S(), 3, MemOperand(x27)),
+ "ld4 {v12.s, v13.s, v14.s, v15.s}[3], [x27]");
+ COMPARE_MACRO(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp]");
- COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0,
- MemOperand(x15, x0, PostIndex)),
- "ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
- COMPARE(Ld4(v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), 1,
- MemOperand(x16, 4, PostIndex)),
- "ld4 {v1.b, v2.b, v3.b, v4.b}[1], [x16], #4");
- COMPARE(Ld4(v2.V4H(), v3.V4H(), v4.V4H(), v5.V4H(), 2,
- MemOperand(x17, 8, PostIndex)),
- "ld4 {v2.h, v3.h, v4.h, v5.h}[2], [x17], #8");
- COMPARE(Ld4(v3.V8H(), v4.V8H(), v5.V8H(), v6.V8H(), 3,
- MemOperand(x18, x1, PostIndex)),
- "ld4 {v3.h, v4.h, v5.h, v6.h}[3], [x18], x1");
- COMPARE(Ld4(v4.V2S(), v5.V2S(), v6.V2S(), v7.V2S(), 0,
- MemOperand(x19, x2, PostIndex)),
- "ld4 {v4.s, v5.s, v6.s, v7.s}[0], [x19], x2");
- COMPARE(Ld4(v5.V4S(), v6.V4S(), v7.V4S(), v8.V4S(), 1,
- MemOperand(x20, 16, PostIndex)),
- "ld4 {v5.s, v6.s, v7.s, v8.s}[1], [x20], #16");
- COMPARE(Ld4(v6.V2D(), v7.V2D(), v8.V2D(), v9.V2D(), 0,
- MemOperand(x21, 32, PostIndex)),
- "ld4 {v6.d, v7.d, v8.d, v9.d}[0], [x21], #32");
- COMPARE(Ld4(v7.B(), v8.B(), v9.B(), v10.B(), 7,
- MemOperand(x22, 4, PostIndex)),
- "ld4 {v7.b, v8.b, v9.b, v10.b}[7], [x22], #4");
- COMPARE(Ld4(v8.B(), v9.B(), v10.B(), v11.B(), 15,
- MemOperand(x23, x3, PostIndex)),
- "ld4 {v8.b, v9.b, v10.b, v11.b}[15], [x23], x3");
- COMPARE(Ld4(v9.H(), v10.H(), v11.H(), v12.H(), 3,
- MemOperand(x24, x4, PostIndex)),
- "ld4 {v9.h, v10.h, v11.h, v12.h}[3], [x24], x4");
- COMPARE(Ld4(v10.H(), v11.H(), v12.H(), v13.H(), 7,
- MemOperand(x25, 8, PostIndex)),
- "ld4 {v10.h, v11.h, v12.h, v13.h}[7], [x25], #8");
- COMPARE(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 1,
- MemOperand(x26, 16, PostIndex)),
- "ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
- COMPARE(Ld4(v12.S(), v13.S(), v14.S(), v15.S(), 3,
- MemOperand(x27, x5, PostIndex)),
- "ld4 {v12.s, v13.s, v14.s, v15.s}[3], [x27], x5");
- COMPARE(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 3,
- MemOperand(x26, 16, PostIndex)),
- "ld4 {v11.s, v12.s, v13.s, v14.s}[3], [x26], #16");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
- MemOperand(sp, x6, PostIndex)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
- MemOperand(sp, 32, PostIndex)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], #32");
+ COMPARE_MACRO(Ld4(v0.V8B(),
+ v1.V8B(),
+ v2.V8B(),
+ v3.V8B(),
+ 0,
+ MemOperand(x15, x0, PostIndex)),
+ "ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
+ COMPARE_MACRO(Ld4(v1.V16B(),
+ v2.V16B(),
+ v3.V16B(),
+ v4.V16B(),
+ 1,
+ MemOperand(x16, 4, PostIndex)),
+ "ld4 {v1.b, v2.b, v3.b, v4.b}[1], [x16], #4");
+ COMPARE_MACRO(Ld4(v2.V4H(),
+ v3.V4H(),
+ v4.V4H(),
+ v5.V4H(),
+ 2,
+ MemOperand(x17, 8, PostIndex)),
+ "ld4 {v2.h, v3.h, v4.h, v5.h}[2], [x17], #8");
+ COMPARE_MACRO(Ld4(v3.V8H(),
+ v4.V8H(),
+ v5.V8H(),
+ v6.V8H(),
+ 3,
+ MemOperand(x18, x1, PostIndex)),
+ "ld4 {v3.h, v4.h, v5.h, v6.h}[3], [x18], x1");
+ COMPARE_MACRO(Ld4(v4.V2S(),
+ v5.V2S(),
+ v6.V2S(),
+ v7.V2S(),
+ 0,
+ MemOperand(x19, x2, PostIndex)),
+ "ld4 {v4.s, v5.s, v6.s, v7.s}[0], [x19], x2");
+ COMPARE_MACRO(Ld4(v5.V4S(),
+ v6.V4S(),
+ v7.V4S(),
+ v8.V4S(),
+ 1,
+ MemOperand(x20, 16, PostIndex)),
+ "ld4 {v5.s, v6.s, v7.s, v8.s}[1], [x20], #16");
+ COMPARE_MACRO(Ld4(v6.V2D(),
+ v7.V2D(),
+ v8.V2D(),
+ v9.V2D(),
+ 0,
+ MemOperand(x21, 32, PostIndex)),
+ "ld4 {v6.d, v7.d, v8.d, v9.d}[0], [x21], #32");
+ COMPARE_MACRO(Ld4(v7.B(),
+ v8.B(),
+ v9.B(),
+ v10.B(),
+ 7,
+ MemOperand(x22, 4, PostIndex)),
+ "ld4 {v7.b, v8.b, v9.b, v10.b}[7], [x22], #4");
+ COMPARE_MACRO(Ld4(v8.B(),
+ v9.B(),
+ v10.B(),
+ v11.B(),
+ 15,
+ MemOperand(x23, x3, PostIndex)),
+ "ld4 {v8.b, v9.b, v10.b, v11.b}[15], [x23], x3");
+ COMPARE_MACRO(Ld4(v9.H(),
+ v10.H(),
+ v11.H(),
+ v12.H(),
+ 3,
+ MemOperand(x24, x4, PostIndex)),
+ "ld4 {v9.h, v10.h, v11.h, v12.h}[3], [x24], x4");
+ COMPARE_MACRO(Ld4(v10.H(),
+ v11.H(),
+ v12.H(),
+ v13.H(),
+ 7,
+ MemOperand(x25, 8, PostIndex)),
+ "ld4 {v10.h, v11.h, v12.h, v13.h}[7], [x25], #8");
+ COMPARE_MACRO(Ld4(v11.S(),
+ v12.S(),
+ v13.S(),
+ v14.S(),
+ 1,
+ MemOperand(x26, 16, PostIndex)),
+ "ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
+ COMPARE_MACRO(Ld4(v12.S(),
+ v13.S(),
+ v14.S(),
+ v15.S(),
+ 3,
+ MemOperand(x27, x5, PostIndex)),
+ "ld4 {v12.s, v13.s, v14.s, v15.s}[3], [x27], x5");
+ COMPARE_MACRO(Ld4(v11.S(),
+ v12.S(),
+ v13.S(),
+ v14.S(),
+ 3,
+ MemOperand(x26, 16, PostIndex)),
+ "ld4 {v11.s, v12.s, v13.s, v14.s}[3], [x26], #16");
+ COMPARE_MACRO(Ld4(v13.D(),
+ v14.D(),
+ v15.D(),
+ v16.D(),
+ 1,
+ MemOperand(sp, x6, PostIndex)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
+ COMPARE_MACRO(Ld4(v13.D(),
+ v14.D(),
+ v15.D(),
+ v16.D(),
+ 1,
+ MemOperand(sp, 32, PostIndex)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], #32");
- COMPARE(St1(v0.V8B(), 0, MemOperand(x15)), "st1 {v0.b}[0], [x15]");
- COMPARE(St1(v1.V16B(), 1, MemOperand(x16)), "st1 {v1.b}[1], [x16]");
- COMPARE(St1(v2.V4H(), 2, MemOperand(x17)), "st1 {v2.h}[2], [x17]");
- COMPARE(St1(v3.V8H(), 3, MemOperand(x18)), "st1 {v3.h}[3], [x18]");
- COMPARE(St1(v4.V2S(), 0, MemOperand(x19)), "st1 {v4.s}[0], [x19]");
- COMPARE(St1(v5.V4S(), 1, MemOperand(x20)), "st1 {v5.s}[1], [x20]");
- COMPARE(St1(v6.V2D(), 0, MemOperand(x21)), "st1 {v6.d}[0], [x21]");
- COMPARE(St1(v7.B(), 7, MemOperand(x22)), "st1 {v7.b}[7], [x22]");
- COMPARE(St1(v8.B(), 15, MemOperand(x23)), "st1 {v8.b}[15], [x23]");
- COMPARE(St1(v9.H(), 3, MemOperand(x24)), "st1 {v9.h}[3], [x24]");
- COMPARE(St1(v10.H(), 7, MemOperand(x25)), "st1 {v10.h}[7], [x25]");
- COMPARE(St1(v11.S(), 1, MemOperand(x26)), "st1 {v11.s}[1], [x26]");
- COMPARE(St1(v12.S(), 3, MemOperand(x27)), "st1 {v12.s}[3], [x27]");
- COMPARE(St1(v13.D(), 1, MemOperand(sp)), "st1 {v13.d}[1], [sp]");
+ COMPARE_MACRO(St1(v0.V8B(), 0, MemOperand(x15)), "st1 {v0.b}[0], [x15]");
+ COMPARE_MACRO(St1(v1.V16B(), 1, MemOperand(x16)), "st1 {v1.b}[1], [x16]");
+ COMPARE_MACRO(St1(v2.V4H(), 2, MemOperand(x17)), "st1 {v2.h}[2], [x17]");
+ COMPARE_MACRO(St1(v3.V8H(), 3, MemOperand(x18)), "st1 {v3.h}[3], [x18]");
+ COMPARE_MACRO(St1(v4.V2S(), 0, MemOperand(x19)), "st1 {v4.s}[0], [x19]");
+ COMPARE_MACRO(St1(v5.V4S(), 1, MemOperand(x20)), "st1 {v5.s}[1], [x20]");
+ COMPARE_MACRO(St1(v6.V2D(), 0, MemOperand(x21)), "st1 {v6.d}[0], [x21]");
+ COMPARE_MACRO(St1(v7.B(), 7, MemOperand(x22)), "st1 {v7.b}[7], [x22]");
+ COMPARE_MACRO(St1(v8.B(), 15, MemOperand(x23)), "st1 {v8.b}[15], [x23]");
+ COMPARE_MACRO(St1(v9.H(), 3, MemOperand(x24)), "st1 {v9.h}[3], [x24]");
+ COMPARE_MACRO(St1(v10.H(), 7, MemOperand(x25)), "st1 {v10.h}[7], [x25]");
+ COMPARE_MACRO(St1(v11.S(), 1, MemOperand(x26)), "st1 {v11.s}[1], [x26]");
+ COMPARE_MACRO(St1(v12.S(), 3, MemOperand(x27)), "st1 {v12.s}[3], [x27]");
+ COMPARE_MACRO(St1(v13.D(), 1, MemOperand(sp)), "st1 {v13.d}[1], [sp]");
- COMPARE(St1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
- "st1 {v0.b}[0], [x15], x0");
- COMPARE(St1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)),
- "st1 {v1.b}[1], [x16], #1");
- COMPARE(St1(v2.V4H(), 2, MemOperand(x17, 2, PostIndex)),
- "st1 {v2.h}[2], [x17], #2");
- COMPARE(St1(v3.V8H(), 3, MemOperand(x18, x1, PostIndex)),
- "st1 {v3.h}[3], [x18], x1");
- COMPARE(St1(v4.V2S(), 0, MemOperand(x19, x2, PostIndex)),
- "st1 {v4.s}[0], [x19], x2");
- COMPARE(St1(v5.V4S(), 1, MemOperand(x20, 4, PostIndex)),
- "st1 {v5.s}[1], [x20], #4");
- COMPARE(St1(v6.V2D(), 0, MemOperand(x21, 8, PostIndex)),
- "st1 {v6.d}[0], [x21], #8");
- COMPARE(St1(v7.B(), 7, MemOperand(x22, 1, PostIndex)),
- "st1 {v7.b}[7], [x22], #1");
- COMPARE(St1(v8.B(), 15, MemOperand(x23, x3, PostIndex)),
- "st1 {v8.b}[15], [x23], x3");
- COMPARE(St1(v9.H(), 3, MemOperand(x24, x4, PostIndex)),
- "st1 {v9.h}[3], [x24], x4");
- COMPARE(St1(v10.H(), 7, MemOperand(x25, 2, PostIndex)),
- "st1 {v10.h}[7], [x25], #2");
- COMPARE(St1(v11.S(), 1, MemOperand(x26, 4, PostIndex)),
- "st1 {v11.s}[1], [x26], #4");
- COMPARE(St1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
- "st1 {v12.s}[3], [x27], x5");
- COMPARE(St1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
- "st1 {v13.d}[1], [sp], x6");
- COMPARE(St2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
- "st2 {v0.b, v1.b}[0], [x15], x0");
- COMPARE(St2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
- "st2 {v1.b, v2.b}[1], [x16], #2");
- COMPARE(St2(v2.V4H(), v3.V4H(), 2, MemOperand(x17, 4, PostIndex)),
- "st2 {v2.h, v3.h}[2], [x17], #4");
- COMPARE(St2(v3.V8H(), v4.V8H(), 3, MemOperand(x18, x1, PostIndex)),
- "st2 {v3.h, v4.h}[3], [x18], x1");
- COMPARE(St2(v4.V2S(), v5.V2S(), 0, MemOperand(x19, x2, PostIndex)),
- "st2 {v4.s, v5.s}[0], [x19], x2");
- COMPARE(St2(v5.V4S(), v6.V4S(), 1, MemOperand(x20, 8, PostIndex)),
- "st2 {v5.s, v6.s}[1], [x20], #8");
- COMPARE(St2(v6.V2D(), v7.V2D(), 0, MemOperand(x21, 16, PostIndex)),
- "st2 {v6.d, v7.d}[0], [x21], #16");
- COMPARE(St2(v7.B(), v8.B(), 7, MemOperand(x22, 2, PostIndex)),
- "st2 {v7.b, v8.b}[7], [x22], #2");
- COMPARE(St2(v8.B(), v9.B(), 15, MemOperand(x23, x3, PostIndex)),
- "st2 {v8.b, v9.b}[15], [x23], x3");
- COMPARE(St2(v9.H(), v10.H(), 3, MemOperand(x24, x4, PostIndex)),
- "st2 {v9.h, v10.h}[3], [x24], x4");
- COMPARE(St2(v10.H(), v11.H(), 7, MemOperand(x25, 4, PostIndex)),
- "st2 {v10.h, v11.h}[7], [x25], #4");
- COMPARE(St2(v11.S(), v12.S(), 1, MemOperand(x26, 8, PostIndex)),
- "st2 {v11.s, v12.s}[1], [x26], #8");
- COMPARE(St2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
- "st2 {v12.s, v13.s}[3], [x27], x5");
- COMPARE(St2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
- "st2 {v13.d, v14.d}[1], [sp], x6");
- COMPARE(St3(VLIST3(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
- "st3 {v0.b, v1.b, v2.b}[0], [x15], x0");
- COMPARE(St3(VLIST3(v1.V16B()), 1, MemOperand(x16, 3, PostIndex)),
- "st3 {v1.b, v2.b, v3.b}[1], [x16], #3");
- COMPARE(St3(VLIST3(v2.V4H()), 2, MemOperand(x17, 6, PostIndex)),
- "st3 {v2.h, v3.h, v4.h}[2], [x17], #6");
- COMPARE(St3(VLIST3(v3.V8H()), 3, MemOperand(x18, x1, PostIndex)),
- "st3 {v3.h, v4.h, v5.h}[3], [x18], x1");
- COMPARE(St3(VLIST3(v4.V2S()), 0, MemOperand(x19, x2, PostIndex)),
- "st3 {v4.s, v5.s, v6.s}[0], [x19], x2");
- COMPARE(St3(VLIST3(v5.V4S()), 1, MemOperand(x20, 12, PostIndex)),
- "st3 {v5.s, v6.s, v7.s}[1], [x20], #12");
- COMPARE(St3(VLIST3(v6.V2D()), 0, MemOperand(x21, 24, PostIndex)),
- "st3 {v6.d, v7.d, v8.d}[0], [x21], #24");
- COMPARE(St3(VLIST3(v7.B()), 7, MemOperand(x22, 3, PostIndex)),
- "st3 {v7.b, v8.b, v9.b}[7], [x22], #3");
- COMPARE(St3(VLIST3(v8.B()), 15, MemOperand(x23, x3, PostIndex)),
- "st3 {v8.b, v9.b, v10.b}[15], [x23], x3");
- COMPARE(St3(VLIST3(v9.H()), 3, MemOperand(x24, x4, PostIndex)),
- "st3 {v9.h, v10.h, v11.h}[3], [x24], x4");
- COMPARE(St3(VLIST3(v10.H()), 7, MemOperand(x25, 6, PostIndex)),
- "st3 {v10.h, v11.h, v12.h}[7], [x25], #6");
- COMPARE(St3(VLIST3(v11.S()), 1, MemOperand(x26, 12, PostIndex)),
- "st3 {v11.s, v12.s, v13.s}[1], [x26], #12");
- COMPARE(St3(VLIST3(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
- "st3 {v12.s, v13.s, v14.s}[3], [x27], x5");
- COMPARE(St3(VLIST3(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
- "st3 {v13.d, v14.d, v15.d}[1], [sp], x6");
+ COMPARE_MACRO(St1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
+ "st1 {v0.b}[0], [x15], x0");
+ COMPARE_MACRO(St1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)),
+ "st1 {v1.b}[1], [x16], #1");
+ COMPARE_MACRO(St1(v2.V4H(), 2, MemOperand(x17, 2, PostIndex)),
+ "st1 {v2.h}[2], [x17], #2");
+ COMPARE_MACRO(St1(v3.V8H(), 3, MemOperand(x18, x1, PostIndex)),
+ "st1 {v3.h}[3], [x18], x1");
+ COMPARE_MACRO(St1(v4.V2S(), 0, MemOperand(x19, x2, PostIndex)),
+ "st1 {v4.s}[0], [x19], x2");
+ COMPARE_MACRO(St1(v5.V4S(), 1, MemOperand(x20, 4, PostIndex)),
+ "st1 {v5.s}[1], [x20], #4");
+ COMPARE_MACRO(St1(v6.V2D(), 0, MemOperand(x21, 8, PostIndex)),
+ "st1 {v6.d}[0], [x21], #8");
+ COMPARE_MACRO(St1(v7.B(), 7, MemOperand(x22, 1, PostIndex)),
+ "st1 {v7.b}[7], [x22], #1");
+ COMPARE_MACRO(St1(v8.B(), 15, MemOperand(x23, x3, PostIndex)),
+ "st1 {v8.b}[15], [x23], x3");
+ COMPARE_MACRO(St1(v9.H(), 3, MemOperand(x24, x4, PostIndex)),
+ "st1 {v9.h}[3], [x24], x4");
+ COMPARE_MACRO(St1(v10.H(), 7, MemOperand(x25, 2, PostIndex)),
+ "st1 {v10.h}[7], [x25], #2");
+ COMPARE_MACRO(St1(v11.S(), 1, MemOperand(x26, 4, PostIndex)),
+ "st1 {v11.s}[1], [x26], #4");
+ COMPARE_MACRO(St1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
+ "st1 {v12.s}[3], [x27], x5");
+ COMPARE_MACRO(St1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "st1 {v13.d}[1], [sp], x6");
+ COMPARE_MACRO(St2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
+ "st2 {v0.b, v1.b}[0], [x15], x0");
+ COMPARE_MACRO(St2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
+ "st2 {v1.b, v2.b}[1], [x16], #2");
+ COMPARE_MACRO(St2(v2.V4H(), v3.V4H(), 2, MemOperand(x17, 4, PostIndex)),
+ "st2 {v2.h, v3.h}[2], [x17], #4");
+ COMPARE_MACRO(St2(v3.V8H(), v4.V8H(), 3, MemOperand(x18, x1, PostIndex)),
+ "st2 {v3.h, v4.h}[3], [x18], x1");
+ COMPARE_MACRO(St2(v4.V2S(), v5.V2S(), 0, MemOperand(x19, x2, PostIndex)),
+ "st2 {v4.s, v5.s}[0], [x19], x2");
+ COMPARE_MACRO(St2(v5.V4S(), v6.V4S(), 1, MemOperand(x20, 8, PostIndex)),
+ "st2 {v5.s, v6.s}[1], [x20], #8");
+ COMPARE_MACRO(St2(v6.V2D(), v7.V2D(), 0, MemOperand(x21, 16, PostIndex)),
+ "st2 {v6.d, v7.d}[0], [x21], #16");
+ COMPARE_MACRO(St2(v7.B(), v8.B(), 7, MemOperand(x22, 2, PostIndex)),
+ "st2 {v7.b, v8.b}[7], [x22], #2");
+ COMPARE_MACRO(St2(v8.B(), v9.B(), 15, MemOperand(x23, x3, PostIndex)),
+ "st2 {v8.b, v9.b}[15], [x23], x3");
+ COMPARE_MACRO(St2(v9.H(), v10.H(), 3, MemOperand(x24, x4, PostIndex)),
+ "st2 {v9.h, v10.h}[3], [x24], x4");
+ COMPARE_MACRO(St2(v10.H(), v11.H(), 7, MemOperand(x25, 4, PostIndex)),
+ "st2 {v10.h, v11.h}[7], [x25], #4");
+ COMPARE_MACRO(St2(v11.S(), v12.S(), 1, MemOperand(x26, 8, PostIndex)),
+ "st2 {v11.s, v12.s}[1], [x26], #8");
+ COMPARE_MACRO(St2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
+ "st2 {v12.s, v13.s}[3], [x27], x5");
+ COMPARE_MACRO(St2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "st2 {v13.d, v14.d}[1], [sp], x6");
+ COMPARE_MACRO(St3(VLIST3(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
+ "st3 {v0.b, v1.b, v2.b}[0], [x15], x0");
+ COMPARE_MACRO(St3(VLIST3(v1.V16B()), 1, MemOperand(x16, 3, PostIndex)),
+ "st3 {v1.b, v2.b, v3.b}[1], [x16], #3");
+ COMPARE_MACRO(St3(VLIST3(v2.V4H()), 2, MemOperand(x17, 6, PostIndex)),
+ "st3 {v2.h, v3.h, v4.h}[2], [x17], #6");
+ COMPARE_MACRO(St3(VLIST3(v3.V8H()), 3, MemOperand(x18, x1, PostIndex)),
+ "st3 {v3.h, v4.h, v5.h}[3], [x18], x1");
+ COMPARE_MACRO(St3(VLIST3(v4.V2S()), 0, MemOperand(x19, x2, PostIndex)),
+ "st3 {v4.s, v5.s, v6.s}[0], [x19], x2");
+ COMPARE_MACRO(St3(VLIST3(v5.V4S()), 1, MemOperand(x20, 12, PostIndex)),
+ "st3 {v5.s, v6.s, v7.s}[1], [x20], #12");
+ COMPARE_MACRO(St3(VLIST3(v6.V2D()), 0, MemOperand(x21, 24, PostIndex)),
+ "st3 {v6.d, v7.d, v8.d}[0], [x21], #24");
+ COMPARE_MACRO(St3(VLIST3(v7.B()), 7, MemOperand(x22, 3, PostIndex)),
+ "st3 {v7.b, v8.b, v9.b}[7], [x22], #3");
+ COMPARE_MACRO(St3(VLIST3(v8.B()), 15, MemOperand(x23, x3, PostIndex)),
+ "st3 {v8.b, v9.b, v10.b}[15], [x23], x3");
+ COMPARE_MACRO(St3(VLIST3(v9.H()), 3, MemOperand(x24, x4, PostIndex)),
+ "st3 {v9.h, v10.h, v11.h}[3], [x24], x4");
+ COMPARE_MACRO(St3(VLIST3(v10.H()), 7, MemOperand(x25, 6, PostIndex)),
+ "st3 {v10.h, v11.h, v12.h}[7], [x25], #6");
+ COMPARE_MACRO(St3(VLIST3(v11.S()), 1, MemOperand(x26, 12, PostIndex)),
+ "st3 {v11.s, v12.s, v13.s}[1], [x26], #12");
+ COMPARE_MACRO(St3(VLIST3(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
+ "st3 {v12.s, v13.s, v14.s}[3], [x27], x5");
+ COMPARE_MACRO(St3(VLIST3(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
+ "st3 {v13.d, v14.d, v15.d}[1], [sp], x6");
- COMPARE(St4(VLIST4(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
- "st4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
- COMPARE(St4(VLIST4(v1.V16B()), 1, MemOperand(x16, 4, PostIndex)),
- "st4 {v1.b, v2.b, v3.b, v4.b}[1], [x16], #4");
- COMPARE(St4(VLIST4(v2.V4H()), 2, MemOperand(x17, 8, PostIndex)),
- "st4 {v2.h, v3.h, v4.h, v5.h}[2], [x17], #8");
- COMPARE(St4(VLIST4(v3.V8H()), 3, MemOperand(x18, x1, PostIndex)),
- "st4 {v3.h, v4.h, v5.h, v6.h}[3], [x18], x1");
- COMPARE(St4(VLIST4(v4.V2S()), 0, MemOperand(x19, x2, PostIndex)),
- "st4 {v4.s, v5.s, v6.s, v7.s}[0], [x19], x2");
- COMPARE(St4(VLIST4(v5.V4S()), 1, MemOperand(x20, 16, PostIndex)),
- "st4 {v5.s, v6.s, v7.s, v8.s}[1], [x20], #16");
- COMPARE(St4(VLIST4(v6.V2D()), 0, MemOperand(x21, 32, PostIndex)),
- "st4 {v6.d, v7.d, v8.d, v9.d}[0], [x21], #32");
- COMPARE(St4(VLIST4(v7.B()), 7, MemOperand(x22, 4, PostIndex)),
- "st4 {v7.b, v8.b, v9.b, v10.b}[7], [x22], #4");
- COMPARE(St4(VLIST4(v8.B()), 15, MemOperand(x23, x3, PostIndex)),
- "st4 {v8.b, v9.b, v10.b, v11.b}[15], [x23], x3");
- COMPARE(St4(VLIST4(v9.H()), 3, MemOperand(x24, x4, PostIndex)),
- "st4 {v9.h, v10.h, v11.h, v12.h}[3], [x24], x4");
- COMPARE(St4(VLIST4(v10.H()), 7, MemOperand(x25, 8, PostIndex)),
- "st4 {v10.h, v11.h, v12.h, v13.h}[7], [x25], #8");
- COMPARE(St4(VLIST4(v11.S()), 1, MemOperand(x26, 16, PostIndex)),
- "st4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
- COMPARE(St4(VLIST4(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
- "st4 {v12.s, v13.s, v14.s, v15.s}[3], [x27], x5");
- COMPARE(St4(VLIST4(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
- "st4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
+ COMPARE_MACRO(St4(VLIST4(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
+ "st4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
+ COMPARE_MACRO(St4(VLIST4(v1.V16B()), 1, MemOperand(x16, 4, PostIndex)),
+ "st4 {v1.b, v2.b, v3.b, v4.b}[1], [x16], #4");
+ COMPARE_MACRO(St4(VLIST4(v2.V4H()), 2, MemOperand(x17, 8, PostIndex)),
+ "st4 {v2.h, v3.h, v4.h, v5.h}[2], [x17], #8");
+ COMPARE_MACRO(St4(VLIST4(v3.V8H()), 3, MemOperand(x18, x1, PostIndex)),
+ "st4 {v3.h, v4.h, v5.h, v6.h}[3], [x18], x1");
+ COMPARE_MACRO(St4(VLIST4(v4.V2S()), 0, MemOperand(x19, x2, PostIndex)),
+ "st4 {v4.s, v5.s, v6.s, v7.s}[0], [x19], x2");
+ COMPARE_MACRO(St4(VLIST4(v5.V4S()), 1, MemOperand(x20, 16, PostIndex)),
+ "st4 {v5.s, v6.s, v7.s, v8.s}[1], [x20], #16");
+ COMPARE_MACRO(St4(VLIST4(v6.V2D()), 0, MemOperand(x21, 32, PostIndex)),
+ "st4 {v6.d, v7.d, v8.d, v9.d}[0], [x21], #32");
+ COMPARE_MACRO(St4(VLIST4(v7.B()), 7, MemOperand(x22, 4, PostIndex)),
+ "st4 {v7.b, v8.b, v9.b, v10.b}[7], [x22], #4");
+ COMPARE_MACRO(St4(VLIST4(v8.B()), 15, MemOperand(x23, x3, PostIndex)),
+ "st4 {v8.b, v9.b, v10.b, v11.b}[15], [x23], x3");
+ COMPARE_MACRO(St4(VLIST4(v9.H()), 3, MemOperand(x24, x4, PostIndex)),
+ "st4 {v9.h, v10.h, v11.h, v12.h}[3], [x24], x4");
+ COMPARE_MACRO(St4(VLIST4(v10.H()), 7, MemOperand(x25, 8, PostIndex)),
+ "st4 {v10.h, v11.h, v12.h, v13.h}[7], [x25], #8");
+ COMPARE_MACRO(St4(VLIST4(v11.S()), 1, MemOperand(x26, 16, PostIndex)),
+ "st4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
+ COMPARE_MACRO(St4(VLIST4(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
+ "st4 {v12.s, v13.s, v14.s, v15.s}[3], [x27], x5");
+ COMPARE_MACRO(St4(VLIST4(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
+ "st4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
CLEANUP();
}
@@ -3822,148 +4006,197 @@
TEST(neon_load_all_lanes) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Ld1r(v14.V8B(), MemOperand(x0)), "ld1r {v14.8b}, [x0]");
- COMPARE(Ld1r(v15.V16B(), MemOperand(x1)), "ld1r {v15.16b}, [x1]");
- COMPARE(Ld1r(v16.V4H(), MemOperand(x2)), "ld1r {v16.4h}, [x2]");
- COMPARE(Ld1r(v17.V8H(), MemOperand(x3)), "ld1r {v17.8h}, [x3]");
- COMPARE(Ld1r(v18.V2S(), MemOperand(x4)), "ld1r {v18.2s}, [x4]");
- COMPARE(Ld1r(v19.V4S(), MemOperand(x5)), "ld1r {v19.4s}, [x5]");
- COMPARE(Ld1r(v20.V2D(), MemOperand(sp)), "ld1r {v20.2d}, [sp]");
- COMPARE(Ld1r(v21.V1D(), MemOperand(x30)), "ld1r {v21.1d}, [x30]");
+ COMPARE_MACRO(Ld1r(v14.V8B(), MemOperand(x0)), "ld1r {v14.8b}, [x0]");
+ COMPARE_MACRO(Ld1r(v15.V16B(), MemOperand(x1)), "ld1r {v15.16b}, [x1]");
+ COMPARE_MACRO(Ld1r(v16.V4H(), MemOperand(x2)), "ld1r {v16.4h}, [x2]");
+ COMPARE_MACRO(Ld1r(v17.V8H(), MemOperand(x3)), "ld1r {v17.8h}, [x3]");
+ COMPARE_MACRO(Ld1r(v18.V2S(), MemOperand(x4)), "ld1r {v18.2s}, [x4]");
+ COMPARE_MACRO(Ld1r(v19.V4S(), MemOperand(x5)), "ld1r {v19.4s}, [x5]");
+ COMPARE_MACRO(Ld1r(v20.V2D(), MemOperand(sp)), "ld1r {v20.2d}, [sp]");
+ COMPARE_MACRO(Ld1r(v21.V1D(), MemOperand(x30)), "ld1r {v21.1d}, [x30]");
- COMPARE(Ld1r(v22.V8B(), MemOperand(x6, 1, PostIndex)),
- "ld1r {v22.8b}, [x6], #1");
- COMPARE(Ld1r(v23.V16B(), MemOperand(x7, x16, PostIndex)),
- "ld1r {v23.16b}, [x7], x16");
- COMPARE(Ld1r(v24.V4H(), MemOperand(x8, x17, PostIndex)),
- "ld1r {v24.4h}, [x8], x17");
- COMPARE(Ld1r(v25.V8H(), MemOperand(x9, 2, PostIndex)),
- "ld1r {v25.8h}, [x9], #2");
- COMPARE(Ld1r(v26.V2S(), MemOperand(x10, 4, PostIndex)),
- "ld1r {v26.2s}, [x10], #4");
- COMPARE(Ld1r(v27.V4S(), MemOperand(x11, x18, PostIndex)),
- "ld1r {v27.4s}, [x11], x18");
- COMPARE(Ld1r(v28.V2D(), MemOperand(x12, 8, PostIndex)),
- "ld1r {v28.2d}, [x12], #8");
- COMPARE(Ld1r(v29.V1D(), MemOperand(x13, 8, PostIndex)),
- "ld1r {v29.1d}, [x13], #8");
+ COMPARE_MACRO(Ld1r(v22.V8B(), MemOperand(x6, 1, PostIndex)),
+ "ld1r {v22.8b}, [x6], #1");
+ COMPARE_MACRO(Ld1r(v23.V16B(), MemOperand(x7, x16, PostIndex)),
+ "ld1r {v23.16b}, [x7], x16");
+ COMPARE_MACRO(Ld1r(v24.V4H(), MemOperand(x8, x17, PostIndex)),
+ "ld1r {v24.4h}, [x8], x17");
+ COMPARE_MACRO(Ld1r(v25.V8H(), MemOperand(x9, 2, PostIndex)),
+ "ld1r {v25.8h}, [x9], #2");
+ COMPARE_MACRO(Ld1r(v26.V2S(), MemOperand(x10, 4, PostIndex)),
+ "ld1r {v26.2s}, [x10], #4");
+ COMPARE_MACRO(Ld1r(v27.V4S(), MemOperand(x11, x18, PostIndex)),
+ "ld1r {v27.4s}, [x11], x18");
+ COMPARE_MACRO(Ld1r(v28.V2D(), MemOperand(x12, 8, PostIndex)),
+ "ld1r {v28.2d}, [x12], #8");
+ COMPARE_MACRO(Ld1r(v29.V1D(), MemOperand(x13, 8, PostIndex)),
+ "ld1r {v29.1d}, [x13], #8");
- COMPARE(Ld2r(v14.V8B(), v15.V8B(), MemOperand(x0)),
- "ld2r {v14.8b, v15.8b}, [x0]");
- COMPARE(Ld2r(v15.V16B(), v16.V16B(), MemOperand(x1)),
- "ld2r {v15.16b, v16.16b}, [x1]");
- COMPARE(Ld2r(v16.V4H(), v17.V4H(), MemOperand(x2)),
- "ld2r {v16.4h, v17.4h}, [x2]");
- COMPARE(Ld2r(v17.V8H(), v18.V8H(), MemOperand(x3)),
- "ld2r {v17.8h, v18.8h}, [x3]");
- COMPARE(Ld2r(v18.V2S(), v19.V2S(), MemOperand(x4)),
- "ld2r {v18.2s, v19.2s}, [x4]");
- COMPARE(Ld2r(v19.V4S(), v20.V4S(), MemOperand(x5)),
- "ld2r {v19.4s, v20.4s}, [x5]");
- COMPARE(Ld2r(v20.V2D(), v21.V2D(), MemOperand(sp)),
- "ld2r {v20.2d, v21.2d}, [sp]");
- COMPARE(Ld2r(v21.V8B(), v22.V8B(), MemOperand(x6, 2, PostIndex)),
- "ld2r {v21.8b, v22.8b}, [x6], #2");
- COMPARE(Ld2r(v22.V16B(), v23.V16B(), MemOperand(x7, x16, PostIndex)),
- "ld2r {v22.16b, v23.16b}, [x7], x16");
- COMPARE(Ld2r(v23.V4H(), v24.V4H(), MemOperand(x8, x17, PostIndex)),
- "ld2r {v23.4h, v24.4h}, [x8], x17");
- COMPARE(Ld2r(v24.V8H(), v25.V8H(), MemOperand(x9, 4, PostIndex)),
- "ld2r {v24.8h, v25.8h}, [x9], #4");
- COMPARE(Ld2r(v25.V2S(), v26.V2S(), MemOperand(x10, 8, PostIndex)),
- "ld2r {v25.2s, v26.2s}, [x10], #8");
- COMPARE(Ld2r(v26.V4S(), v27.V4S(), MemOperand(x11, x18, PostIndex)),
- "ld2r {v26.4s, v27.4s}, [x11], x18");
- COMPARE(Ld2r(v27.V2D(), v28.V2D(), MemOperand(x12, 16, PostIndex)),
- "ld2r {v27.2d, v28.2d}, [x12], #16");
+ COMPARE_MACRO(Ld2r(v14.V8B(), v15.V8B(), MemOperand(x0)),
+ "ld2r {v14.8b, v15.8b}, [x0]");
+ COMPARE_MACRO(Ld2r(v15.V16B(), v16.V16B(), MemOperand(x1)),
+ "ld2r {v15.16b, v16.16b}, [x1]");
+ COMPARE_MACRO(Ld2r(v16.V4H(), v17.V4H(), MemOperand(x2)),
+ "ld2r {v16.4h, v17.4h}, [x2]");
+ COMPARE_MACRO(Ld2r(v17.V8H(), v18.V8H(), MemOperand(x3)),
+ "ld2r {v17.8h, v18.8h}, [x3]");
+ COMPARE_MACRO(Ld2r(v18.V2S(), v19.V2S(), MemOperand(x4)),
+ "ld2r {v18.2s, v19.2s}, [x4]");
+ COMPARE_MACRO(Ld2r(v19.V4S(), v20.V4S(), MemOperand(x5)),
+ "ld2r {v19.4s, v20.4s}, [x5]");
+ COMPARE_MACRO(Ld2r(v20.V2D(), v21.V2D(), MemOperand(sp)),
+ "ld2r {v20.2d, v21.2d}, [sp]");
+ COMPARE_MACRO(Ld2r(v21.V8B(), v22.V8B(), MemOperand(x6, 2, PostIndex)),
+ "ld2r {v21.8b, v22.8b}, [x6], #2");
+ COMPARE_MACRO(Ld2r(v22.V16B(), v23.V16B(), MemOperand(x7, x16, PostIndex)),
+ "ld2r {v22.16b, v23.16b}, [x7], x16");
+ COMPARE_MACRO(Ld2r(v23.V4H(), v24.V4H(), MemOperand(x8, x17, PostIndex)),
+ "ld2r {v23.4h, v24.4h}, [x8], x17");
+ COMPARE_MACRO(Ld2r(v24.V8H(), v25.V8H(), MemOperand(x9, 4, PostIndex)),
+ "ld2r {v24.8h, v25.8h}, [x9], #4");
+ COMPARE_MACRO(Ld2r(v25.V2S(), v26.V2S(), MemOperand(x10, 8, PostIndex)),
+ "ld2r {v25.2s, v26.2s}, [x10], #8");
+ COMPARE_MACRO(Ld2r(v26.V4S(), v27.V4S(), MemOperand(x11, x18, PostIndex)),
+ "ld2r {v26.4s, v27.4s}, [x11], x18");
+ COMPARE_MACRO(Ld2r(v27.V2D(), v28.V2D(), MemOperand(x12, 16, PostIndex)),
+ "ld2r {v27.2d, v28.2d}, [x12], #16");
- COMPARE(Ld3r(v14.V8B(), v15.V8B(), v16.V8B(),
- MemOperand(x0)),
- "ld3r {v14.8b, v15.8b, v16.8b}, [x0]");
- COMPARE(Ld3r(v15.V16B(), v16.V16B(), v17.V16B(),
- MemOperand(x1)),
- "ld3r {v15.16b, v16.16b, v17.16b}, [x1]");
- COMPARE(Ld3r(v16.V4H(), v17.V4H(), v18.V4H(),
- MemOperand(x2)),
- "ld3r {v16.4h, v17.4h, v18.4h}, [x2]");
- COMPARE(Ld3r(v17.V8H(), v18.V8H(), v19.V8H(),
- MemOperand(x3)),
- "ld3r {v17.8h, v18.8h, v19.8h}, [x3]");
- COMPARE(Ld3r(v18.V2S(), v19.V2S(), v20.V2S(),
- MemOperand(x4)),
- "ld3r {v18.2s, v19.2s, v20.2s}, [x4]");
- COMPARE(Ld3r(v19.V4S(), v20.V4S(), v21.V4S(),
- MemOperand(x5)),
- "ld3r {v19.4s, v20.4s, v21.4s}, [x5]");
- COMPARE(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(),
- MemOperand(sp)),
- "ld3r {v20.2d, v21.2d, v22.2d}, [sp]");
- COMPARE(Ld3r(v21.V8B(), v22.V8B(), v23.V8B(),
- MemOperand(x6, 3, PostIndex)),
- "ld3r {v21.8b, v22.8b, v23.8b}, [x6], #3");
- COMPARE(Ld3r(v22.V16B(), v23.V16B(), v24.V16B(),
- MemOperand(x7, x16, PostIndex)),
- "ld3r {v22.16b, v23.16b, v24.16b}, [x7], x16");
- COMPARE(Ld3r(v23.V4H(), v24.V4H(), v25.V4H(),
- MemOperand(x8, x17, PostIndex)),
- "ld3r {v23.4h, v24.4h, v25.4h}, [x8], x17");
- COMPARE(Ld3r(v24.V8H(), v25.V8H(), v26.V8H(),
- MemOperand(x9, 6, PostIndex)),
- "ld3r {v24.8h, v25.8h, v26.8h}, [x9], #6");
- COMPARE(Ld3r(v25.V2S(), v26.V2S(), v27.V2S(),
- MemOperand(x10, 12, PostIndex)),
- "ld3r {v25.2s, v26.2s, v27.2s}, [x10], #12");
- COMPARE(Ld3r(v26.V4S(), v27.V4S(), v28.V4S(),
- MemOperand(x11, x18, PostIndex)),
- "ld3r {v26.4s, v27.4s, v28.4s}, [x11], x18");
- COMPARE(Ld3r(v27.V2D(), v28.V2D(), v29.V2D(),
- MemOperand(x12, 24, PostIndex)),
- "ld3r {v27.2d, v28.2d, v29.2d}, [x12], #24");
+ COMPARE_MACRO(Ld3r(v14.V8B(), v15.V8B(), v16.V8B(), MemOperand(x0)),
+ "ld3r {v14.8b, v15.8b, v16.8b}, [x0]");
+ COMPARE_MACRO(Ld3r(v15.V16B(), v16.V16B(), v17.V16B(), MemOperand(x1)),
+ "ld3r {v15.16b, v16.16b, v17.16b}, [x1]");
+ COMPARE_MACRO(Ld3r(v16.V4H(), v17.V4H(), v18.V4H(), MemOperand(x2)),
+ "ld3r {v16.4h, v17.4h, v18.4h}, [x2]");
+ COMPARE_MACRO(Ld3r(v17.V8H(), v18.V8H(), v19.V8H(), MemOperand(x3)),
+ "ld3r {v17.8h, v18.8h, v19.8h}, [x3]");
+ COMPARE_MACRO(Ld3r(v18.V2S(), v19.V2S(), v20.V2S(), MemOperand(x4)),
+ "ld3r {v18.2s, v19.2s, v20.2s}, [x4]");
+ COMPARE_MACRO(Ld3r(v19.V4S(), v20.V4S(), v21.V4S(), MemOperand(x5)),
+ "ld3r {v19.4s, v20.4s, v21.4s}, [x5]");
+ COMPARE_MACRO(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(sp)),
+ "ld3r {v20.2d, v21.2d, v22.2d}, [sp]");
+ COMPARE_MACRO(Ld3r(v21.V8B(),
+ v22.V8B(),
+ v23.V8B(),
+ MemOperand(x6, 3, PostIndex)),
+ "ld3r {v21.8b, v22.8b, v23.8b}, [x6], #3");
+ COMPARE_MACRO(Ld3r(v22.V16B(),
+ v23.V16B(),
+ v24.V16B(),
+ MemOperand(x7, x16, PostIndex)),
+ "ld3r {v22.16b, v23.16b, v24.16b}, [x7], x16");
+ COMPARE_MACRO(Ld3r(v23.V4H(),
+ v24.V4H(),
+ v25.V4H(),
+ MemOperand(x8, x17, PostIndex)),
+ "ld3r {v23.4h, v24.4h, v25.4h}, [x8], x17");
+ COMPARE_MACRO(Ld3r(v24.V8H(),
+ v25.V8H(),
+ v26.V8H(),
+ MemOperand(x9, 6, PostIndex)),
+ "ld3r {v24.8h, v25.8h, v26.8h}, [x9], #6");
+ COMPARE_MACRO(Ld3r(v25.V2S(),
+ v26.V2S(),
+ v27.V2S(),
+ MemOperand(x10, 12, PostIndex)),
+ "ld3r {v25.2s, v26.2s, v27.2s}, [x10], #12");
+ COMPARE_MACRO(Ld3r(v26.V4S(),
+ v27.V4S(),
+ v28.V4S(),
+ MemOperand(x11, x18, PostIndex)),
+ "ld3r {v26.4s, v27.4s, v28.4s}, [x11], x18");
+ COMPARE_MACRO(Ld3r(v27.V2D(),
+ v28.V2D(),
+ v29.V2D(),
+ MemOperand(x12, 24, PostIndex)),
+ "ld3r {v27.2d, v28.2d, v29.2d}, [x12], #24");
- COMPARE(Ld4r(v14.V8B(), v15.V8B(), v16.V8B(), v17.V8B(),
- MemOperand(x0)),
- "ld4r {v14.8b, v15.8b, v16.8b, v17.8b}, [x0]");
- COMPARE(Ld4r(v15.V16B(), v16.V16B(), v17.V16B(), v18.V16B(),
- MemOperand(x1)),
- "ld4r {v15.16b, v16.16b, v17.16b, v18.16b}, [x1]");
- COMPARE(Ld4r(v16.V4H(), v17.V4H(), v18.V4H(), v19.V4H(),
- MemOperand(x2)),
- "ld4r {v16.4h, v17.4h, v18.4h, v19.4h}, [x2]");
- COMPARE(Ld4r(v17.V8H(), v18.V8H(), v19.V8H(), v20.V8H(),
- MemOperand(x3)),
- "ld4r {v17.8h, v18.8h, v19.8h, v20.8h}, [x3]");
- COMPARE(Ld4r(v18.V2S(), v19.V2S(), v20.V2S(), v21.V2S(),
- MemOperand(x4)),
- "ld4r {v18.2s, v19.2s, v20.2s, v21.2s}, [x4]");
- COMPARE(Ld4r(v19.V4S(), v20.V4S(), v21.V4S(), v22.V4S(),
- MemOperand(x5)),
- "ld4r {v19.4s, v20.4s, v21.4s, v22.4s}, [x5]");
- COMPARE(Ld4r(v20.V2D(), v21.V2D(), v22.V2D(), v23.V2D(),
- MemOperand(sp)),
- "ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [sp]");
- COMPARE(Ld4r(v21.V8B(), v22.V8B(), v23.V8B(), v24.V8B(),
- MemOperand(x6, 4, PostIndex)),
- "ld4r {v21.8b, v22.8b, v23.8b, v24.8b}, [x6], #4");
- COMPARE(Ld4r(v22.V16B(), v23.V16B(), v24.V16B(), v25.V16B(),
- MemOperand(x7, x16, PostIndex)),
- "ld4r {v22.16b, v23.16b, v24.16b, v25.16b}, [x7], x16");
- COMPARE(Ld4r(v23.V4H(), v24.V4H(), v25.V4H(), v26.V4H(),
- MemOperand(x8, x17, PostIndex)),
- "ld4r {v23.4h, v24.4h, v25.4h, v26.4h}, [x8], x17");
- COMPARE(Ld4r(v24.V8H(), v25.V8H(), v26.V8H(), v27.V8H(),
- MemOperand(x9, 8, PostIndex)),
- "ld4r {v24.8h, v25.8h, v26.8h, v27.8h}, [x9], #8");
- COMPARE(Ld4r(v25.V2S(), v26.V2S(), v27.V2S(), v28.V2S(),
- MemOperand(x10, 16, PostIndex)),
- "ld4r {v25.2s, v26.2s, v27.2s, v28.2s}, [x10], #16");
- COMPARE(Ld4r(v26.V4S(), v27.V4S(), v28.V4S(), v29.V4S(),
- MemOperand(x11, x18, PostIndex)),
- "ld4r {v26.4s, v27.4s, v28.4s, v29.4s}, [x11], x18");
- COMPARE(Ld4r(v27.V2D(), v28.V2D(), v29.V2D(), v30.V2D(),
- MemOperand(x12, 32, PostIndex)),
- "ld4r {v27.2d, v28.2d, v29.2d, v30.2d}, [x12], #32");
+ COMPARE_MACRO(Ld4r(v14.V8B(),
+ v15.V8B(),
+ v16.V8B(),
+ v17.V8B(),
+ MemOperand(x0)),
+ "ld4r {v14.8b, v15.8b, v16.8b, v17.8b}, [x0]");
+ COMPARE_MACRO(Ld4r(v15.V16B(),
+ v16.V16B(),
+ v17.V16B(),
+ v18.V16B(),
+ MemOperand(x1)),
+ "ld4r {v15.16b, v16.16b, v17.16b, v18.16b}, [x1]");
+ COMPARE_MACRO(Ld4r(v16.V4H(),
+ v17.V4H(),
+ v18.V4H(),
+ v19.V4H(),
+ MemOperand(x2)),
+ "ld4r {v16.4h, v17.4h, v18.4h, v19.4h}, [x2]");
+ COMPARE_MACRO(Ld4r(v17.V8H(),
+ v18.V8H(),
+ v19.V8H(),
+ v20.V8H(),
+ MemOperand(x3)),
+ "ld4r {v17.8h, v18.8h, v19.8h, v20.8h}, [x3]");
+ COMPARE_MACRO(Ld4r(v18.V2S(),
+ v19.V2S(),
+ v20.V2S(),
+ v21.V2S(),
+ MemOperand(x4)),
+ "ld4r {v18.2s, v19.2s, v20.2s, v21.2s}, [x4]");
+ COMPARE_MACRO(Ld4r(v19.V4S(),
+ v20.V4S(),
+ v21.V4S(),
+ v22.V4S(),
+ MemOperand(x5)),
+ "ld4r {v19.4s, v20.4s, v21.4s, v22.4s}, [x5]");
+ COMPARE_MACRO(Ld4r(v20.V2D(),
+ v21.V2D(),
+ v22.V2D(),
+ v23.V2D(),
+ MemOperand(sp)),
+ "ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [sp]");
+ COMPARE_MACRO(Ld4r(v21.V8B(),
+ v22.V8B(),
+ v23.V8B(),
+ v24.V8B(),
+ MemOperand(x6, 4, PostIndex)),
+ "ld4r {v21.8b, v22.8b, v23.8b, v24.8b}, [x6], #4");
+ COMPARE_MACRO(Ld4r(v22.V16B(),
+ v23.V16B(),
+ v24.V16B(),
+ v25.V16B(),
+ MemOperand(x7, x16, PostIndex)),
+ "ld4r {v22.16b, v23.16b, v24.16b, v25.16b}, [x7], x16");
+ COMPARE_MACRO(Ld4r(v23.V4H(),
+ v24.V4H(),
+ v25.V4H(),
+ v26.V4H(),
+ MemOperand(x8, x17, PostIndex)),
+ "ld4r {v23.4h, v24.4h, v25.4h, v26.4h}, [x8], x17");
+ COMPARE_MACRO(Ld4r(v24.V8H(),
+ v25.V8H(),
+ v26.V8H(),
+ v27.V8H(),
+ MemOperand(x9, 8, PostIndex)),
+ "ld4r {v24.8h, v25.8h, v26.8h, v27.8h}, [x9], #8");
+ COMPARE_MACRO(Ld4r(v25.V2S(),
+ v26.V2S(),
+ v27.V2S(),
+ v28.V2S(),
+ MemOperand(x10, 16, PostIndex)),
+ "ld4r {v25.2s, v26.2s, v27.2s, v28.2s}, [x10], #16");
+ COMPARE_MACRO(Ld4r(v26.V4S(),
+ v27.V4S(),
+ v28.V4S(),
+ v29.V4S(),
+ MemOperand(x11, x18, PostIndex)),
+ "ld4r {v26.4s, v27.4s, v28.4s, v29.4s}, [x11], x18");
+ COMPARE_MACRO(Ld4r(v27.V2D(),
+ v28.V2D(),
+ v29.V2D(),
+ v30.V2D(),
+ MemOperand(x12, 32, PostIndex)),
+ "ld4r {v27.2d, v28.2d, v29.2d, v30.2d}, [x12], #32");
CLEANUP();
}
@@ -4005,1169 +4238,1350 @@
TEST(neon_3same) {
- SETUP_MACRO();
+ SETUP();
- #define DISASM_INST(M, S) \
- COMPARE(Cmeq(v0.M, v1.M, v2.M), "cmeq v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmeq(v0.M, v1.M, v2.M), "cmeq v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmge(v0.M, v1.M, v2.M), "cmge v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmge(v0.M, v1.M, v2.M), "cmge v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmgt(v0.M, v1.M, v2.M), "cmgt v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmgt(v0.M, v1.M, v2.M), "cmgt v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmhi(v0.M, v1.M, v2.M), "cmhi v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmhi(v0.M, v1.M, v2.M), "cmhi v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmhs(v0.M, v1.M, v2.M), "cmhs v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmhs(v0.M, v1.M, v2.M), "cmhs v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmtst(v0.M, v1.M, v2.M), "cmtst v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmtst(v0.M, v1.M, v2.M), "cmtst v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Add(v0.M, v1.M, v2.M), "add v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Add(v0.M, v1.M, v2.M), "add v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sub(v3.M, v4.M, v5.M), "sub v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sub(v3.M, v4.M, v5.M), "sub v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sabd(v3.M, v4.M, v5.M), "sabd v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sabd(v3.M, v4.M, v5.M), "sabd v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uabd(v3.M, v4.M, v5.M), "uabd v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uabd(v3.M, v4.M, v5.M), "uabd v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Saba(v3.M, v4.M, v5.M), "saba v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Saba(v3.M, v4.M, v5.M), "saba v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uaba(v3.M, v4.M, v5.M), "uaba v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uaba(v3.M, v4.M, v5.M), "uaba v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Smax(v3.M, v4.M, v5.M), "smax v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Smax(v3.M, v4.M, v5.M), "smax v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Smin(v3.M, v4.M, v5.M), "smin v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Smin(v3.M, v4.M, v5.M), "smin v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Umax(v3.M, v4.M, v5.M), "umax v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Umax(v3.M, v4.M, v5.M), "umax v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Umin(v3.M, v4.M, v5.M), "umin v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Umin(v3.M, v4.M, v5.M), "umin v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Smaxp(v3.M, v4.M, v5.M), "smaxp v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Smaxp(v3.M, v4.M, v5.M), "smaxp v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sminp(v3.M, v4.M, v5.M), "sminp v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sminp(v3.M, v4.M, v5.M), "sminp v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Umaxp(v3.M, v4.M, v5.M), "umaxp v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Umaxp(v3.M, v4.M, v5.M), "umaxp v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uminp(v3.M, v4.M, v5.M), "uminp v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uminp(v3.M, v4.M, v5.M), "uminp v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uqadd(v6.M, v7.M, v8.M), "uqadd v6." S ", v7." S ", v8." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uqadd(v6.M, v7.M, v8.M), "uqadd v6." S ", v7." S ", v8." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqadd(v9.M, v10.M, v11.M), "sqadd v9." S ", v10." S ", v11." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqadd(v9.M, v10.M, v11.M), \
+ "sqadd v9." S ", v10." S \
+ ", " \
+ "v11." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uqsub(v6.M, v7.M, v8.M), "uqsub v6." S ", v7." S ", v8." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uqsub(v6.M, v7.M, v8.M), "uqsub v6." S ", v7." S ", v8." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqsub(v9.M, v10.M, v11.M), "sqsub v9." S ", v10." S ", v11." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqsub(v9.M, v10.M, v11.M), \
+ "sqsub v9." S ", v10." S \
+ ", " \
+ "v11." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sshl(v12.M, v13.M, v14.M), "sshl v12." S ", v13." S ", v14." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sshl(v12.M, v13.M, v14.M), \
+ "sshl v12." S ", v13." S \
+ ", " \
+ "v14." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Ushl(v15.M, v16.M, v17.M), "ushl v15." S ", v16." S ", v17." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Ushl(v15.M, v16.M, v17.M), \
+ "ushl v15." S ", v16." S \
+ ", " \
+ "v17." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqshl(v18.M, v19.M, v20.M), "sqshl v18." S ", v19." S ", v20." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqshl(v18.M, v19.M, v20.M), \
+ "sqshl v18." S ", v19." S ", v20." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uqshl(v21.M, v22.M, v23.M), "uqshl v21." S ", v22." S ", v23." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uqshl(v21.M, v22.M, v23.M), \
+ "uqshl v21." S ", v22." S ", v23." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Srshl(v24.M, v25.M, v26.M), "srshl v24." S ", v25." S ", v26." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Srshl(v24.M, v25.M, v26.M), \
+ "srshl v24." S ", v25." S ", v26." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Urshl(v27.M, v28.M, v29.M), "urshl v27." S ", v28." S ", v29." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Urshl(v27.M, v28.M, v29.M), \
+ "urshl v27." S ", v28." S ", v29." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqrshl(v30.M, v31.M, v0.M), "sqrshl v30." S ", v31." S ", v0." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqrshl(v30.M, v31.M, v0.M), \
+ "sqrshl v30." S ", v31." S ", v0." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uqrshl(v1.M, v2.M, v3.M), "uqrshl v1." S ", v2." S ", v3." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uqrshl(v1.M, v2.M, v3.M), "uqrshl v1." S ", v2." S ", v3." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Shadd(v4.M, v5.M, v6.M), "shadd v4." S ", v5." S ", v6." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Shadd(v4.M, v5.M, v6.M), "shadd v4." S ", v5." S ", v6." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uhadd(v7.M, v8.M, v9.M), "uhadd v7." S ", v8." S ", v9." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uhadd(v7.M, v8.M, v9.M), "uhadd v7." S ", v8." S ", v9." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Srhadd(v10.M, v11.M, v12.M), "srhadd v10." S ", v11." S ", v12." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Srhadd(v10.M, v11.M, v12.M), \
+ "srhadd v10." S ", v11." S ", v12." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Urhadd(v13.M, v14.M, v15.M), "urhadd v13." S ", v14." S ", v15." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Urhadd(v13.M, v14.M, v15.M), \
+ "urhadd v13." S ", v14." S ", v15." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Shsub(v16.M, v17.M, v18.M), "shsub v16." S ", v17." S ", v18." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Shsub(v16.M, v17.M, v18.M), \
+ "shsub v16." S ", v17." S ", v18." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uhsub(v19.M, v20.M, v21.M), "uhsub v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uhsub(v19.M, v20.M, v21.M), \
+ "uhsub v19." S ", v20." S ", v21." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Addp(v19.M, v20.M, v21.M), "addp v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Addp(v19.M, v20.M, v21.M), \
+ "addp v19." S ", v20." S \
+ ", " \
+ "v21." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Mla(v19.M, v20.M, v21.M), "mla v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Mla(v19.M, v20.M, v21.M), "mla v19." S ", v20." S ", v21." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Mls(v19.M, v20.M, v21.M), "mls v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Mls(v19.M, v20.M, v21.M), "mls v19." S ", v20." S ", v21." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Mul(v19.M, v20.M, v21.M), "mul v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Mul(v19.M, v20.M, v21.M), "mul v19." S ", v20." S ", v21." S);
NEON_FORMAT_LIST_BHS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqdmulh(v1.M, v2.M, v3.M), "sqdmulh v1." S ", v2." S ", v3." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqdmulh(v1.M, v2.M, v3.M), \
+ "sqdmulh v1." S ", v2." S \
+ ", " \
+ "v3." S);
NEON_FORMAT_LIST_HS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Sqrdmulh(v1.M, v2.M, v3.M), "sqrdmulh v1." S ", v2." S ", v3." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqrdmulh(v1.M, v2.M, v3.M), \
+ "sqrdmulh v1." S ", v2." S ", v3." S);
NEON_FORMAT_LIST_HS(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(And(v6.V8B(), v7.V8B(), v8.V8B()), "and v6.8b, v7.8b, v8.8b");
- COMPARE(And(v6.V16B(), v7.V16B(), v8.V16B()), "and v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(And(v6.V8B(), v7.V8B(), v8.V8B()), "and v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(And(v6.V16B(), v7.V16B(), v8.V16B()),
+ "and v6.16b, v7.16b, v8.16b");
- COMPARE(Bic(v6.V8B(), v7.V8B(), v8.V8B()), "bic v6.8b, v7.8b, v8.8b");
- COMPARE(Bic(v6.V16B(), v7.V16B(), v8.V16B()), "bic v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Bic(v6.V8B(), v7.V8B(), v8.V8B()), "bic v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Bic(v6.V16B(), v7.V16B(), v8.V16B()),
+ "bic v6.16b, v7.16b, v8.16b");
- COMPARE(Orr(v6.V8B(), v7.V8B(), v8.V8B()), "orr v6.8b, v7.8b, v8.8b");
- COMPARE(Orr(v6.V16B(), v7.V16B(), v8.V16B()), "orr v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Orr(v6.V8B(), v7.V8B(), v8.V8B()), "orr v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Orr(v6.V16B(), v7.V16B(), v8.V16B()),
+ "orr v6.16b, v7.16b, v8.16b");
- COMPARE(Orr(v6.V8B(), v7.V8B(), v7.V8B()), "mov v6.8b, v7.8b");
- COMPARE(Orr(v6.V16B(), v7.V16B(), v7.V16B()), "mov v6.16b, v7.16b");
+ COMPARE_MACRO(Orr(v6.V8B(), v7.V8B(), v7.V8B()), "mov v6.8b, v7.8b");
+ COMPARE_MACRO(Orr(v6.V16B(), v7.V16B(), v7.V16B()), "mov v6.16b, v7.16b");
- COMPARE(Mov(v6.V8B(), v8.V8B()), "mov v6.8b, v8.8b");
- COMPARE(Mov(v6.V16B(), v8.V16B()), "mov v6.16b, v8.16b");
+ COMPARE_MACRO(Mov(v6.V8B(), v8.V8B()), "mov v6.8b, v8.8b");
+ COMPARE_MACRO(Mov(v6.V16B(), v8.V16B()), "mov v6.16b, v8.16b");
- COMPARE(Orn(v6.V8B(), v7.V8B(), v8.V8B()), "orn v6.8b, v7.8b, v8.8b");
- COMPARE(Orn(v6.V16B(), v7.V16B(), v8.V16B()), "orn v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Orn(v6.V8B(), v7.V8B(), v8.V8B()), "orn v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Orn(v6.V16B(), v7.V16B(), v8.V16B()),
+ "orn v6.16b, v7.16b, v8.16b");
- COMPARE(Eor(v6.V8B(), v7.V8B(), v8.V8B()), "eor v6.8b, v7.8b, v8.8b");
- COMPARE(Eor(v6.V16B(), v7.V16B(), v8.V16B()), "eor v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Eor(v6.V8B(), v7.V8B(), v8.V8B()), "eor v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Eor(v6.V16B(), v7.V16B(), v8.V16B()),
+ "eor v6.16b, v7.16b, v8.16b");
- COMPARE(Bif(v6.V8B(), v7.V8B(), v8.V8B()), "bif v6.8b, v7.8b, v8.8b");
- COMPARE(Bif(v6.V16B(), v7.V16B(), v8.V16B()), "bif v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Bif(v6.V8B(), v7.V8B(), v8.V8B()), "bif v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Bif(v6.V16B(), v7.V16B(), v8.V16B()),
+ "bif v6.16b, v7.16b, v8.16b");
- COMPARE(Bit(v6.V8B(), v7.V8B(), v8.V8B()), "bit v6.8b, v7.8b, v8.8b");
- COMPARE(Bit(v6.V16B(), v7.V16B(), v8.V16B()), "bit v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Bit(v6.V8B(), v7.V8B(), v8.V8B()), "bit v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Bit(v6.V16B(), v7.V16B(), v8.V16B()),
+ "bit v6.16b, v7.16b, v8.16b");
- COMPARE(Bsl(v6.V8B(), v7.V8B(), v8.V8B()), "bsl v6.8b, v7.8b, v8.8b");
- COMPARE(Bsl(v6.V16B(), v7.V16B(), v8.V16B()), "bsl v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Bsl(v6.V8B(), v7.V8B(), v8.V8B()), "bsl v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Bsl(v6.V16B(), v7.V16B(), v8.V16B()),
+ "bsl v6.16b, v7.16b, v8.16b");
- COMPARE(Pmul(v6.V8B(), v7.V8B(), v8.V8B()), "pmul v6.8b, v7.8b, v8.8b");
- COMPARE(Pmul(v6.V16B(), v7.V16B(), v8.V16B()), "pmul v6.16b, v7.16b, v8.16b");
+ COMPARE_MACRO(Pmul(v6.V8B(), v7.V8B(), v8.V8B()), "pmul v6.8b, v7.8b, v8.8b");
+ COMPARE_MACRO(Pmul(v6.V16B(), v7.V16B(), v8.V16B()),
+ "pmul v6.16b, v7.16b, v8.16b");
CLEANUP();
}
-#define NEON_FORMAT_LIST_FP(V) \
- V(V2S(), "2s") \
- V(V4S(), "4s") \
+#define NEON_FORMAT_LIST_FP(V) \
+ V(V2S(), "2s") \
+ V(V4S(), "4s") \
V(V2D(), "2d")
TEST(neon_fp_3same) {
- SETUP_MACRO();
+ SETUP();
- #define DISASM_INST(M, S) \
- COMPARE(Fadd(v0.M, v1.M, v2.M), "fadd v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fadd(v0.M, v1.M, v2.M), "fadd v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fsub(v3.M, v4.M, v5.M), "fsub v3." S ", v4." S ", v5." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fsub(v3.M, v4.M, v5.M), "fsub v3." S ", v4." S ", v5." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmul(v6.M, v7.M, v8.M), "fmul v6." S ", v7." S ", v8." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmul(v6.M, v7.M, v8.M), "fmul v6." S ", v7." S ", v8." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fdiv(v9.M, v10.M, v11.M), "fdiv v9." S ", v10." S ", v11." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fdiv(v9.M, v10.M, v11.M), "fdiv v9." S ", v10." S ", v11." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmin(v12.M, v13.M, v14.M), "fmin v12." S ", v13." S ", v14." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmin(v12.M, v13.M, v14.M), \
+ "fmin v12." S ", v13." S \
+ ", " \
+ "v14." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fminnm(v15.M, v16.M, v17.M), "fminnm v15." S ", v16." S ", v17." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fminnm(v15.M, v16.M, v17.M), \
+ "fminnm v15." S ", v16." S ", v17." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmax(v18.M, v19.M, v20.M), "fmax v18." S ", v19." S ", v20." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmax(v18.M, v19.M, v20.M), \
+ "fmax v18." S ", v19." S \
+ ", " \
+ "v20." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmaxnm(v21.M, v22.M, v23.M), "fmaxnm v21." S ", v22." S ", v23." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmaxnm(v21.M, v22.M, v23.M), \
+ "fmaxnm v21." S ", v22." S ", v23." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Frecps(v24.M, v25.M, v26.M), "frecps v24." S ", v25." S ", v26." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Frecps(v24.M, v25.M, v26.M), \
+ "frecps v24." S ", v25." S ", v26." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Frsqrts(v27.M, v28.M, v29.M), "frsqrts v27." S ", v28." S ", v29." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Frsqrts(v27.M, v28.M, v29.M), \
+ "frsqrts v27." S ", v28." S ", v29." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmulx(v30.M, v31.M, v0.M), "fmulx v30." S ", v31." S ", v0." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmulx(v30.M, v31.M, v0.M), \
+ "fmulx v30." S ", v31." S \
+ ", " \
+ "v0." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmla(v1.M, v2.M, v3.M), "fmla v1." S ", v2." S ", v3." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmla(v1.M, v2.M, v3.M), "fmla v1." S ", v2." S ", v3." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmls(v4.M, v5.M, v6.M), "fmls v4." S ", v5." S ", v6." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmls(v4.M, v5.M, v6.M), "fmls v4." S ", v5." S ", v6." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fabd(v7.M, v8.M, v9.M), "fabd v7." S ", v8." S ", v9." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fabd(v7.M, v8.M, v9.M), "fabd v7." S ", v8." S ", v9." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Faddp(v10.M, v11.M, v12.M), "faddp v10." S ", v11." S ", v12." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Faddp(v10.M, v11.M, v12.M), \
+ "faddp v10." S ", v11." S ", v12." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmaxp(v13.M, v14.M, v15.M), "fmaxp v13." S ", v14." S ", v15." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmaxp(v13.M, v14.M, v15.M), \
+ "fmaxp v13." S ", v14." S ", v15." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fminp(v16.M, v17.M, v18.M), "fminp v16." S ", v17." S ", v18." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fminp(v16.M, v17.M, v18.M), \
+ "fminp v16." S ", v17." S ", v18." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fmaxnmp(v19.M, v20.M, v21.M), "fmaxnmp v19." S ", v20." S ", v21." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fmaxnmp(v19.M, v20.M, v21.M), \
+ "fmaxnmp v19." S ", v20." S ", v21." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fminnmp(v22.M, v23.M, v24.M), "fminnmp v22." S ", v23." S ", v24." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fminnmp(v22.M, v23.M, v24.M), \
+ "fminnmp v22." S ", v23." S ", v24." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fcmeq(v25.M, v26.M, v27.M), "fcmeq v25." S ", v26." S ", v27." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmeq(v25.M, v26.M, v27.M), \
+ "fcmeq v25." S ", v26." S ", v27." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fcmge(v25.M, v26.M, v27.M), "fcmge v25." S ", v26." S ", v27." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmge(v25.M, v26.M, v27.M), \
+ "fcmge v25." S ", v26." S ", v27." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Fcmgt(v25.M, v26.M, v27.M), "fcmgt v25." S ", v26." S ", v27." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmgt(v25.M, v26.M, v27.M), \
+ "fcmgt v25." S ", v26." S ", v27." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Facge(v25.M, v26.M, v27.M), "facge v25." S ", v26." S ", v27." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Facge(v25.M, v26.M, v27.M), \
+ "facge v25." S ", v26." S ", v27." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Facgt(v25.M, v26.M, v27.M), "facgt v25." S ", v26." S ", v27." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Facgt(v25.M, v26.M, v27.M), \
+ "facgt v25." S ", v26." S ", v27." S);
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
CLEANUP();
}
-#define NEON_SCALAR_FORMAT_LIST(V) \
- V(B(), "b") \
- V(H(), "h") \
- V(S(), "s") \
+#define NEON_SCALAR_FORMAT_LIST(V) \
+ V(B(), "b") \
+ V(H(), "h") \
+ V(S(), "s") \
V(D(), "d")
TEST(neon_scalar_3same) {
- SETUP_MACRO();
+ SETUP();
// Instructions that only support D-sized scalar operations.
- COMPARE(Add(v0.D(), v1.D(), v2.D()), "add d0, d1, d2");
- COMPARE(Sub(v3.D(), v4.D(), v5.D()), "sub d3, d4, d5");
- COMPARE(Cmeq(v0.D(), v1.D(), v2.D()), "cmeq d0, d1, d2");
- COMPARE(Cmge(v3.D(), v4.D(), v5.D()), "cmge d3, d4, d5");
- COMPARE(Cmgt(v6.D(), v7.D(), v8.D()), "cmgt d6, d7, d8");
- COMPARE(Cmhi(v0.D(), v1.D(), v2.D()), "cmhi d0, d1, d2");
- COMPARE(Cmhs(v3.D(), v4.D(), v5.D()), "cmhs d3, d4, d5");
- COMPARE(Cmtst(v6.D(), v7.D(), v8.D()), "cmtst d6, d7, d8");
- COMPARE(Ushl(v6.D(), v7.D(), v8.D()), "ushl d6, d7, d8");
- COMPARE(Sshl(v6.D(), v7.D(), v8.D()), "sshl d6, d7, d8");
- COMPARE(Urshl(v9.D(), v10.D(), v11.D()), "urshl d9, d10, d11");
- COMPARE(Srshl(v9.D(), v10.D(), v11.D()), "srshl d9, d10, d11");
+ COMPARE_MACRO(Add(v0.D(), v1.D(), v2.D()), "add d0, d1, d2");
+ COMPARE_MACRO(Sub(v3.D(), v4.D(), v5.D()), "sub d3, d4, d5");
+ COMPARE_MACRO(Cmeq(v0.D(), v1.D(), v2.D()), "cmeq d0, d1, d2");
+ COMPARE_MACRO(Cmge(v3.D(), v4.D(), v5.D()), "cmge d3, d4, d5");
+ COMPARE_MACRO(Cmgt(v6.D(), v7.D(), v8.D()), "cmgt d6, d7, d8");
+ COMPARE_MACRO(Cmhi(v0.D(), v1.D(), v2.D()), "cmhi d0, d1, d2");
+ COMPARE_MACRO(Cmhs(v3.D(), v4.D(), v5.D()), "cmhs d3, d4, d5");
+ COMPARE_MACRO(Cmtst(v6.D(), v7.D(), v8.D()), "cmtst d6, d7, d8");
+ COMPARE_MACRO(Ushl(v6.D(), v7.D(), v8.D()), "ushl d6, d7, d8");
+ COMPARE_MACRO(Sshl(v6.D(), v7.D(), v8.D()), "sshl d6, d7, d8");
+ COMPARE_MACRO(Urshl(v9.D(), v10.D(), v11.D()), "urshl d9, d10, d11");
+ COMPARE_MACRO(Srshl(v9.D(), v10.D(), v11.D()), "srshl d9, d10, d11");
// Instructions that support S and D-sized scalar operations.
- COMPARE(Frecps(v12.S(), v13.S(), v14.S()), "frecps s12, s13, s14");
- COMPARE(Frecps(v15.D(), v16.D(), v17.D()), "frecps d15, d16, d17");
- COMPARE(Frsqrts(v18.S(), v19.S(), v20.S()), "frsqrts s18, s19, s20");
- COMPARE(Frsqrts(v21.D(), v22.D(), v23.D()), "frsqrts d21, d22, d23");
- COMPARE(Fmulx(v12.S(), v13.S(), v14.S()), "fmulx s12, s13, s14");
- COMPARE(Fmulx(v15.D(), v16.D(), v17.D()), "fmulx d15, d16, d17");
- COMPARE(Fcmeq(v12.S(), v13.S(), v14.S()), "fcmeq s12, s13, s14");
- COMPARE(Fcmeq(v15.D(), v16.D(), v17.D()), "fcmeq d15, d16, d17");
- COMPARE(Fcmge(v12.S(), v13.S(), v14.S()), "fcmge s12, s13, s14");
- COMPARE(Fcmge(v15.D(), v16.D(), v17.D()), "fcmge d15, d16, d17");
- COMPARE(Fcmgt(v12.S(), v13.S(), v14.S()), "fcmgt s12, s13, s14");
- COMPARE(Fcmgt(v15.D(), v16.D(), v17.D()), "fcmgt d15, d16, d17");
- COMPARE(Fcmge(v12.S(), v13.S(), v14.S()), "fcmge s12, s13, s14");
- COMPARE(Fcmge(v15.D(), v16.D(), v17.D()), "fcmge d15, d16, d17");
- COMPARE(Facgt(v12.S(), v13.S(), v14.S()), "facgt s12, s13, s14");
- COMPARE(Facgt(v15.D(), v16.D(), v17.D()), "facgt d15, d16, d17");
+ COMPARE_MACRO(Frecps(v12.S(), v13.S(), v14.S()), "frecps s12, s13, s14");
+ COMPARE_MACRO(Frecps(v15.D(), v16.D(), v17.D()), "frecps d15, d16, d17");
+ COMPARE_MACRO(Frsqrts(v18.S(), v19.S(), v20.S()), "frsqrts s18, s19, s20");
+ COMPARE_MACRO(Frsqrts(v21.D(), v22.D(), v23.D()), "frsqrts d21, d22, d23");
+ COMPARE_MACRO(Fmulx(v12.S(), v13.S(), v14.S()), "fmulx s12, s13, s14");
+ COMPARE_MACRO(Fmulx(v15.D(), v16.D(), v17.D()), "fmulx d15, d16, d17");
+ COMPARE_MACRO(Fcmeq(v12.S(), v13.S(), v14.S()), "fcmeq s12, s13, s14");
+ COMPARE_MACRO(Fcmeq(v15.D(), v16.D(), v17.D()), "fcmeq d15, d16, d17");
+ COMPARE_MACRO(Fcmge(v12.S(), v13.S(), v14.S()), "fcmge s12, s13, s14");
+ COMPARE_MACRO(Fcmge(v15.D(), v16.D(), v17.D()), "fcmge d15, d16, d17");
+ COMPARE_MACRO(Fcmgt(v12.S(), v13.S(), v14.S()), "fcmgt s12, s13, s14");
+ COMPARE_MACRO(Fcmgt(v15.D(), v16.D(), v17.D()), "fcmgt d15, d16, d17");
+ COMPARE_MACRO(Fcmge(v12.S(), v13.S(), v14.S()), "fcmge s12, s13, s14");
+ COMPARE_MACRO(Fcmge(v15.D(), v16.D(), v17.D()), "fcmge d15, d16, d17");
+ COMPARE_MACRO(Facgt(v12.S(), v13.S(), v14.S()), "facgt s12, s13, s14");
+ COMPARE_MACRO(Facgt(v15.D(), v16.D(), v17.D()), "facgt d15, d16, d17");
// Instructions that support H and S-sized scalar operations.
- COMPARE(Sqdmulh(v12.S(), v13.S(), v14.S()), "sqdmulh s12, s13, s14");
- COMPARE(Sqdmulh(v15.H(), v16.H(), v17.H()), "sqdmulh h15, h16, h17");
- COMPARE(Sqrdmulh(v12.S(), v13.S(), v14.S()), "sqrdmulh s12, s13, s14");
- COMPARE(Sqrdmulh(v15.H(), v16.H(), v17.H()), "sqrdmulh h15, h16, h17");
+ COMPARE_MACRO(Sqdmulh(v12.S(), v13.S(), v14.S()), "sqdmulh s12, s13, s14");
+ COMPARE_MACRO(Sqdmulh(v15.H(), v16.H(), v17.H()), "sqdmulh h15, h16, h17");
+ COMPARE_MACRO(Sqrdmulh(v12.S(), v13.S(), v14.S()), "sqrdmulh s12, s13, s14");
+ COMPARE_MACRO(Sqrdmulh(v15.H(), v16.H(), v17.H()), "sqrdmulh h15, h16, h17");
- #define DISASM_INST(M, R) \
- COMPARE(Uqadd(v6.M, v7.M, v8.M), "uqadd " R "6, " R "7, " R "8");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Uqadd(v6.M, v7.M, v8.M), "uqadd " R "6, " R "7, " R "8");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Uqsub(v9.M, v10.M, v11.M), "uqsub " R "9, " R "10, " R "11");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Uqsub(v9.M, v10.M, v11.M), "uqsub " R "9, " R "10, " R "11");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Sqadd(v12.M, v13.M, v14.M), "sqadd " R "12, " R "13, " R "14");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Sqadd(v12.M, v13.M, v14.M), \
+ "sqadd " R "12, " R "13, " R \
+ "1" \
+ "4");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Sqsub(v15.M, v16.M, v17.M), "sqsub " R "15, " R "16, " R "17");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Sqsub(v15.M, v16.M, v17.M), \
+ "sqsub " R "15, " R "16, " R \
+ "1" \
+ "7");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Uqshl(v18.M, v19.M, v20.M), "uqshl " R "18, " R "19, " R "20");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Uqshl(v18.M, v19.M, v20.M), \
+ "uqshl " R "18, " R "19, " R \
+ "2" \
+ "0");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Sqshl(v21.M, v22.M, v23.M), "sqshl " R "21, " R "22, " R "23");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Sqshl(v21.M, v22.M, v23.M), \
+ "sqshl " R "21, " R "22, " R \
+ "2" \
+ "3");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Uqrshl(v30.M, v31.M, v0.M), "uqrshl " R "30, " R "31, " R "0");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Uqrshl(v30.M, v31.M, v0.M), "uqrshl " R "30, " R "31, " R "0");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, R) \
- COMPARE(Sqrshl(v1.M, v2.M, v3.M), "sqrshl " R "1, " R "2, " R "3");
+#define DISASM_INST(M, R) \
+ COMPARE_MACRO(Sqrshl(v1.M, v2.M, v3.M), "sqrshl " R "1, " R "2, " R "3");
NEON_SCALAR_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
CLEANUP();
}
TEST(neon_byelement) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Mul(v0.V4H(), v1.V4H(), v2.H(), 0), "mul v0.4h, v1.4h, v2.h[0]");
- COMPARE(Mul(v2.V8H(), v3.V8H(), v15.H(), 7), "mul v2.8h, v3.8h, v15.h[7]");
- COMPARE(Mul(v0.V2S(), v1.V2S(), v2.S(), 0), "mul v0.2s, v1.2s, v2.s[0]");
- COMPARE(Mul(v2.V4S(), v3.V4S(), v15.S(), 3), "mul v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Mul(v0.V4H(), v1.V4H(), v2.H(), 0),
+ "mul v0.4h, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Mul(v2.V8H(), v3.V8H(), v15.H(), 7),
+ "mul v2.8h, v3.8h, v15.h[7]");
+ COMPARE_MACRO(Mul(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "mul v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Mul(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "mul v2.4s, v3.4s, v15.s[3]");
- COMPARE(Mla(v0.V4H(), v1.V4H(), v2.H(), 0), "mla v0.4h, v1.4h, v2.h[0]");
- COMPARE(Mla(v2.V8H(), v3.V8H(), v15.H(), 7), "mla v2.8h, v3.8h, v15.h[7]");
- COMPARE(Mla(v0.V2S(), v1.V2S(), v2.S(), 0), "mla v0.2s, v1.2s, v2.s[0]");
- COMPARE(Mla(v2.V4S(), v3.V4S(), v15.S(), 3), "mla v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Mla(v0.V4H(), v1.V4H(), v2.H(), 0),
+ "mla v0.4h, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Mla(v2.V8H(), v3.V8H(), v15.H(), 7),
+ "mla v2.8h, v3.8h, v15.h[7]");
+ COMPARE_MACRO(Mla(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "mla v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Mla(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "mla v2.4s, v3.4s, v15.s[3]");
- COMPARE(Mls(v0.V4H(), v1.V4H(), v2.H(), 0), "mls v0.4h, v1.4h, v2.h[0]");
- COMPARE(Mls(v2.V8H(), v3.V8H(), v15.H(), 7), "mls v2.8h, v3.8h, v15.h[7]");
- COMPARE(Mls(v0.V2S(), v1.V2S(), v2.S(), 0), "mls v0.2s, v1.2s, v2.s[0]");
- COMPARE(Mls(v2.V4S(), v3.V4S(), v15.S(), 3), "mls v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Mls(v0.V4H(), v1.V4H(), v2.H(), 0),
+ "mls v0.4h, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Mls(v2.V8H(), v3.V8H(), v15.H(), 7),
+ "mls v2.8h, v3.8h, v15.h[7]");
+ COMPARE_MACRO(Mls(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "mls v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Mls(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "mls v2.4s, v3.4s, v15.s[3]");
- COMPARE(Sqdmulh(v0.V4H(), v1.V4H(), v2.H(), 0),
- "sqdmulh v0.4h, v1.4h, v2.h[0]");
- COMPARE(Sqdmulh(v2.V8H(), v3.V8H(), v15.H(), 7),
- "sqdmulh v2.8h, v3.8h, v15.h[7]");
- COMPARE(Sqdmulh(v0.V2S(), v1.V2S(), v2.S(), 0),
- "sqdmulh v0.2s, v1.2s, v2.s[0]");
- COMPARE(Sqdmulh(v2.V4S(), v3.V4S(), v15.S(), 3),
- "sqdmulh v2.4s, v3.4s, v15.s[3]");
- COMPARE(Sqdmulh(h0, h1, v2.H(), 0), "sqdmulh h0, h1, v2.h[0]");
- COMPARE(Sqdmulh(s0, s1, v2.S(), 0), "sqdmulh s0, s1, v2.s[0]");
+ COMPARE_MACRO(Sqdmulh(v0.V4H(), v1.V4H(), v2.H(), 0),
+ "sqdmulh v0.4h, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Sqdmulh(v2.V8H(), v3.V8H(), v15.H(), 7),
+ "sqdmulh v2.8h, v3.8h, v15.h[7]");
+ COMPARE_MACRO(Sqdmulh(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "sqdmulh v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Sqdmulh(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "sqdmulh v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Sqdmulh(h0, h1, v2.H(), 0), "sqdmulh h0, h1, v2.h[0]");
+ COMPARE_MACRO(Sqdmulh(s0, s1, v2.S(), 0), "sqdmulh s0, s1, v2.s[0]");
- COMPARE(Sqrdmulh(v0.V4H(), v1.V4H(), v2.H(), 0),
- "sqrdmulh v0.4h, v1.4h, v2.h[0]");
- COMPARE(Sqrdmulh(v2.V8H(), v3.V8H(), v15.H(), 7),
- "sqrdmulh v2.8h, v3.8h, v15.h[7]");
- COMPARE(Sqrdmulh(v0.V2S(), v1.V2S(), v2.S(), 0),
- "sqrdmulh v0.2s, v1.2s, v2.s[0]");
- COMPARE(Sqrdmulh(v2.V4S(), v3.V4S(), v15.S(), 3),
- "sqrdmulh v2.4s, v3.4s, v15.s[3]");
- COMPARE(Sqrdmulh(h0, h1, v2.H(), 0), "sqrdmulh h0, h1, v2.h[0]");
- COMPARE(Sqrdmulh(s0, s1, v2.S(), 0), "sqrdmulh s0, s1, v2.s[0]");
+ COMPARE_MACRO(Sqrdmulh(v0.V4H(), v1.V4H(), v2.H(), 0),
+ "sqrdmulh v0.4h, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Sqrdmulh(v2.V8H(), v3.V8H(), v15.H(), 7),
+ "sqrdmulh v2.8h, v3.8h, v15.h[7]");
+ COMPARE_MACRO(Sqrdmulh(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "sqrdmulh v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Sqrdmulh(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "sqrdmulh v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Sqrdmulh(h0, h1, v2.H(), 0), "sqrdmulh h0, h1, v2.h[0]");
+ COMPARE_MACRO(Sqrdmulh(s0, s1, v2.S(), 0), "sqrdmulh s0, s1, v2.s[0]");
- COMPARE(Smull(v0.V4S(), v1.V4H(), v2.H(), 0), "smull v0.4s, v1.4h, v2.h[0]");
- COMPARE(Smull2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "smull2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Smull(v0.V2D(), v1.V2S(), v2.S(), 0), "smull v0.2d, v1.2s, v2.s[0]");
- COMPARE(Smull2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "smull2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Smull(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "smull v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Smull2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "smull2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Smull(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "smull v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Smull2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "smull2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Umull(v0.V4S(), v1.V4H(), v2.H(), 0), "umull v0.4s, v1.4h, v2.h[0]");
- COMPARE(Umull2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "umull2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Umull(v0.V2D(), v1.V2S(), v2.S(), 0), "umull v0.2d, v1.2s, v2.s[0]");
- COMPARE(Umull2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "umull2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Umull(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "umull v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Umull2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "umull2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Umull(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "umull v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Umull2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "umull2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Smlal(v0.V4S(), v1.V4H(), v2.H(), 0), "smlal v0.4s, v1.4h, v2.h[0]");
- COMPARE(Smlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "smlal2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Smlal(v0.V2D(), v1.V2S(), v2.S(), 0), "smlal v0.2d, v1.2s, v2.s[0]");
- COMPARE(Smlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "smlal2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Smlal(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "smlal v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Smlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "smlal2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Smlal(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "smlal v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Smlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "smlal2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Umlal(v0.V4S(), v1.V4H(), v2.H(), 0), "umlal v0.4s, v1.4h, v2.h[0]");
- COMPARE(Umlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "umlal2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Umlal(v0.V2D(), v1.V2S(), v2.S(), 0), "umlal v0.2d, v1.2s, v2.s[0]");
- COMPARE(Umlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "umlal2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Umlal(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "umlal v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Umlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "umlal2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Umlal(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "umlal v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Umlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "umlal2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Smlsl(v0.V4S(), v1.V4H(), v2.H(), 0), "smlsl v0.4s, v1.4h, v2.h[0]");
- COMPARE(Smlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "smlsl2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Smlsl(v0.V2D(), v1.V2S(), v2.S(), 0), "smlsl v0.2d, v1.2s, v2.s[0]");
- COMPARE(Smlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "smlsl2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Smlsl(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "smlsl v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Smlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "smlsl2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Smlsl(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "smlsl v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Smlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "smlsl2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Umlsl(v0.V4S(), v1.V4H(), v2.H(), 0), "umlsl v0.4s, v1.4h, v2.h[0]");
- COMPARE(Umlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "umlsl2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Umlsl(v0.V2D(), v1.V2S(), v2.S(), 0), "umlsl v0.2d, v1.2s, v2.s[0]");
- COMPARE(Umlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "umlsl2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Umlsl(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "umlsl v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Umlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "umlsl2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Umlsl(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "umlsl v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Umlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "umlsl2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Sqdmull(v0.V4S(), v1.V4H(), v2.H(), 0),
- "sqdmull v0.4s, v1.4h, v2.h[0]");
- COMPARE(Sqdmull2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "sqdmull2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Sqdmull(v0.V2D(), v1.V2S(), v2.S(), 0),
- "sqdmull v0.2d, v1.2s, v2.s[0]");
- COMPARE(Sqdmull2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "sqdmull2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Sqdmull(s0, h1, v2.H(), 0), "sqdmull s0, h1, v2.h[0]");
- COMPARE(Sqdmull(d0, s1, v2.S(), 0), "sqdmull d0, s1, v2.s[0]");
+ COMPARE_MACRO(Sqdmull(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "sqdmull v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Sqdmull2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "sqdmull2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Sqdmull(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "sqdmull v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Sqdmull2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "sqdmull2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Sqdmull(s0, h1, v2.H(), 0), "sqdmull s0, h1, v2.h[0]");
+ COMPARE_MACRO(Sqdmull(d0, s1, v2.S(), 0), "sqdmull d0, s1, v2.s[0]");
- COMPARE(Sqdmlal(v0.V4S(), v1.V4H(), v2.H(), 0),
- "sqdmlal v0.4s, v1.4h, v2.h[0]");
- COMPARE(Sqdmlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "sqdmlal2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Sqdmlal(v0.V2D(), v1.V2S(), v2.S(), 0),
- "sqdmlal v0.2d, v1.2s, v2.s[0]");
- COMPARE(Sqdmlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "sqdmlal2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Sqdmlal(s0, h1, v2.H(), 0), "sqdmlal s0, h1, v2.h[0]");
- COMPARE(Sqdmlal(d0, s1, v2.S(), 0), "sqdmlal d0, s1, v2.s[0]");
+ COMPARE_MACRO(Sqdmlal(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "sqdmlal v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Sqdmlal2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "sqdmlal2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Sqdmlal(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "sqdmlal v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Sqdmlal2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "sqdmlal2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Sqdmlal(s0, h1, v2.H(), 0), "sqdmlal s0, h1, v2.h[0]");
+ COMPARE_MACRO(Sqdmlal(d0, s1, v2.S(), 0), "sqdmlal d0, s1, v2.s[0]");
- COMPARE(Sqdmlsl(v0.V4S(), v1.V4H(), v2.H(), 0),
- "sqdmlsl v0.4s, v1.4h, v2.h[0]");
- COMPARE(Sqdmlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
- "sqdmlsl2 v2.4s, v3.8h, v4.h[7]");
- COMPARE(Sqdmlsl(v0.V2D(), v1.V2S(), v2.S(), 0),
- "sqdmlsl v0.2d, v1.2s, v2.s[0]");
- COMPARE(Sqdmlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
- "sqdmlsl2 v2.2d, v3.4s, v4.s[3]");
- COMPARE(Sqdmlsl(s0, h1, v2.H(), 0), "sqdmlsl s0, h1, v2.h[0]");
- COMPARE(Sqdmlsl(d0, s1, v2.S(), 0), "sqdmlsl d0, s1, v2.s[0]");
+ COMPARE_MACRO(Sqdmlsl(v0.V4S(), v1.V4H(), v2.H(), 0),
+ "sqdmlsl v0.4s, v1.4h, v2.h[0]");
+ COMPARE_MACRO(Sqdmlsl2(v2.V4S(), v3.V8H(), v4.H(), 7),
+ "sqdmlsl2 v2.4s, v3.8h, v4.h[7]");
+ COMPARE_MACRO(Sqdmlsl(v0.V2D(), v1.V2S(), v2.S(), 0),
+ "sqdmlsl v0.2d, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Sqdmlsl2(v2.V2D(), v3.V4S(), v4.S(), 3),
+ "sqdmlsl2 v2.2d, v3.4s, v4.s[3]");
+ COMPARE_MACRO(Sqdmlsl(s0, h1, v2.H(), 0), "sqdmlsl s0, h1, v2.h[0]");
+ COMPARE_MACRO(Sqdmlsl(d0, s1, v2.S(), 0), "sqdmlsl d0, s1, v2.s[0]");
CLEANUP();
}
TEST(neon_fp_byelement) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Fmul(v0.V2S(), v1.V2S(), v2.S(), 0), "fmul v0.2s, v1.2s, v2.s[0]");
- COMPARE(Fmul(v2.V4S(), v3.V4S(), v15.S(), 3), "fmul v2.4s, v3.4s, v15.s[3]");
- COMPARE(Fmul(v0.V2D(), v1.V2D(), v2.D(), 0), "fmul v0.2d, v1.2d, v2.d[0]");
- COMPARE(Fmul(d0, d1, v2.D(), 0), "fmul d0, d1, v2.d[0]");
- COMPARE(Fmul(s0, s1, v2.S(), 0), "fmul s0, s1, v2.s[0]");
+ COMPARE_MACRO(Fmul(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "fmul v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Fmul(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "fmul v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Fmul(v0.V2D(), v1.V2D(), v2.D(), 0),
+ "fmul v0.2d, v1.2d, v2.d[0]");
+ COMPARE_MACRO(Fmul(d0, d1, v2.D(), 0), "fmul d0, d1, v2.d[0]");
+ COMPARE_MACRO(Fmul(s0, s1, v2.S(), 0), "fmul s0, s1, v2.s[0]");
- COMPARE(Fmla(v0.V2S(), v1.V2S(), v2.S(), 0), "fmla v0.2s, v1.2s, v2.s[0]");
- COMPARE(Fmla(v2.V4S(), v3.V4S(), v15.S(), 3), "fmla v2.4s, v3.4s, v15.s[3]");
- COMPARE(Fmla(v0.V2D(), v1.V2D(), v2.D(), 0), "fmla v0.2d, v1.2d, v2.d[0]");
- COMPARE(Fmla(d0, d1, v2.D(), 0), "fmla d0, d1, v2.d[0]");
- COMPARE(Fmla(s0, s1, v2.S(), 0), "fmla s0, s1, v2.s[0]");
+ COMPARE_MACRO(Fmla(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "fmla v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Fmla(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "fmla v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Fmla(v0.V2D(), v1.V2D(), v2.D(), 0),
+ "fmla v0.2d, v1.2d, v2.d[0]");
+ COMPARE_MACRO(Fmla(d0, d1, v2.D(), 0), "fmla d0, d1, v2.d[0]");
+ COMPARE_MACRO(Fmla(s0, s1, v2.S(), 0), "fmla s0, s1, v2.s[0]");
- COMPARE(Fmls(v0.V2S(), v1.V2S(), v2.S(), 0), "fmls v0.2s, v1.2s, v2.s[0]");
- COMPARE(Fmls(v2.V4S(), v3.V4S(), v15.S(), 3), "fmls v2.4s, v3.4s, v15.s[3]");
- COMPARE(Fmls(v0.V2D(), v1.V2D(), v2.D(), 0), "fmls v0.2d, v1.2d, v2.d[0]");
- COMPARE(Fmls(d0, d1, v2.D(), 0), "fmls d0, d1, v2.d[0]");
- COMPARE(Fmls(s0, s1, v2.S(), 0), "fmls s0, s1, v2.s[0]");
+ COMPARE_MACRO(Fmls(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "fmls v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Fmls(v2.V4S(), v3.V4S(), v15.S(), 3),
+ "fmls v2.4s, v3.4s, v15.s[3]");
+ COMPARE_MACRO(Fmls(v0.V2D(), v1.V2D(), v2.D(), 0),
+ "fmls v0.2d, v1.2d, v2.d[0]");
+ COMPARE_MACRO(Fmls(d0, d1, v2.D(), 0), "fmls d0, d1, v2.d[0]");
+ COMPARE_MACRO(Fmls(s0, s1, v2.S(), 0), "fmls s0, s1, v2.s[0]");
- COMPARE(Fmulx(v0.V2S(), v1.V2S(), v2.S(), 0), "fmulx v0.2s, v1.2s, v2.s[0]");
- COMPARE(Fmulx(v2.V4S(), v3.V4S(), v8.S(), 3), "fmulx v2.4s, v3.4s, v8.s[3]");
- COMPARE(Fmulx(v0.V2D(), v1.V2D(), v2.D(), 0), "fmulx v0.2d, v1.2d, v2.d[0]");
- COMPARE(Fmulx(d0, d1, v2.D(), 0), "fmulx d0, d1, v2.d[0]");
- COMPARE(Fmulx(s0, s1, v2.S(), 0), "fmulx s0, s1, v2.s[0]");
+ COMPARE_MACRO(Fmulx(v0.V2S(), v1.V2S(), v2.S(), 0),
+ "fmulx v0.2s, v1.2s, v2.s[0]");
+ COMPARE_MACRO(Fmulx(v2.V4S(), v3.V4S(), v8.S(), 3),
+ "fmulx v2.4s, v3.4s, v8.s[3]");
+ COMPARE_MACRO(Fmulx(v0.V2D(), v1.V2D(), v2.D(), 0),
+ "fmulx v0.2d, v1.2d, v2.d[0]");
+ COMPARE_MACRO(Fmulx(d0, d1, v2.D(), 0), "fmulx d0, d1, v2.d[0]");
+ COMPARE_MACRO(Fmulx(s0, s1, v2.S(), 0), "fmulx s0, s1, v2.s[0]");
CLEANUP();
}
TEST(neon_3different) {
- SETUP_MACRO();
+ SETUP();
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uaddl(v0.TA, v1.TB, v2.TB), "uaddl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uaddl(v0.TA, v1.TB, v2.TB), \
+ "uaddl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uaddl2(v0.TA, v1.TB, v2.TB), \
- "uaddl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uaddl2(v0.TA, v1.TB, v2.TB), \
+ "uaddl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uaddw(v0.TA, v1.TA, v2.TB), "uaddw v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uaddw(v0.TA, v1.TA, v2.TB), \
+ "uaddw v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uaddw2(v0.TA, v1.TA, v2.TB), \
- "uaddw2 v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uaddw2(v0.TA, v1.TA, v2.TB), \
+ "uaddw2 v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Saddl(v0.TA, v1.TB, v2.TB), "saddl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Saddl(v0.TA, v1.TB, v2.TB), \
+ "saddl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Saddl2(v0.TA, v1.TB, v2.TB), \
- "saddl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Saddl2(v0.TA, v1.TB, v2.TB), \
+ "saddl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Saddw(v0.TA, v1.TA, v2.TB), "saddw v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Saddw(v0.TA, v1.TA, v2.TB), \
+ "saddw v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Saddw2(v0.TA, v1.TA, v2.TB), \
- "saddw2 v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Saddw2(v0.TA, v1.TA, v2.TB), \
+ "saddw2 v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Usubl(v0.TA, v1.TB, v2.TB), "usubl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Usubl(v0.TA, v1.TB, v2.TB), \
+ "usubl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Usubl2(v0.TA, v1.TB, v2.TB), \
- "usubl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Usubl2(v0.TA, v1.TB, v2.TB), \
+ "usubl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Usubw(v0.TA, v1.TA, v2.TB), "usubw v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Usubw(v0.TA, v1.TA, v2.TB), \
+ "usubw v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Usubw2(v0.TA, v1.TA, v2.TB), \
- "usubw2 v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Usubw2(v0.TA, v1.TA, v2.TB), \
+ "usubw2 v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Ssubl(v0.TA, v1.TB, v2.TB), "ssubl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Ssubl(v0.TA, v1.TB, v2.TB), \
+ "ssubl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Ssubl2(v0.TA, v1.TB, v2.TB), \
- "ssubl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Ssubl2(v0.TA, v1.TB, v2.TB), \
+ "ssubl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Ssubw(v0.TA, v1.TA, v2.TB), "ssubw v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Ssubw(v0.TA, v1.TA, v2.TB), \
+ "ssubw v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Ssubw2(v0.TA, v1.TA, v2.TB), \
- "ssubw2 v0." TAS ", v1." TAS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Ssubw2(v0.TA, v1.TA, v2.TB), \
+ "ssubw2 v0." TAS ", v1." TAS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Sabal(v0.TA, v1.TB, v2.TB), "sabal v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Sabal(v0.TA, v1.TB, v2.TB), \
+ "sabal v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Sabal2(v0.TA, v1.TB, v2.TB), \
- "sabal2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Sabal2(v0.TA, v1.TB, v2.TB), \
+ "sabal2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uabal(v0.TA, v1.TB, v2.TB), "uabal v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uabal(v0.TA, v1.TB, v2.TB), \
+ "uabal v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uabal2(v0.TA, v1.TB, v2.TB), \
- "uabal2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uabal2(v0.TA, v1.TB, v2.TB), \
+ "uabal2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Sabdl(v0.TA, v1.TB, v2.TB), "sabdl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Sabdl(v0.TA, v1.TB, v2.TB), \
+ "sabdl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Sabdl2(v0.TA, v1.TB, v2.TB), \
- "sabdl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Sabdl2(v0.TA, v1.TB, v2.TB), \
+ "sabdl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uabdl(v0.TA, v1.TB, v2.TB), "uabdl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uabdl(v0.TA, v1.TB, v2.TB), \
+ "uabdl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uabdl2(v0.TA, v1.TB, v2.TB), \
- "uabdl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uabdl2(v0.TA, v1.TB, v2.TB), \
+ "uabdl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smlal(v0.TA, v1.TB, v2.TB), "smlal v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smlal(v0.TA, v1.TB, v2.TB), \
+ "smlal v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smlal2(v0.TA, v1.TB, v2.TB), \
- "smlal2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smlal2(v0.TA, v1.TB, v2.TB), \
+ "smlal2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umlsl(v0.TA, v1.TB, v2.TB), "umlsl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umlsl(v0.TA, v1.TB, v2.TB), \
+ "umlsl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umlsl2(v0.TA, v1.TB, v2.TB), \
- "umlsl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umlsl2(v0.TA, v1.TB, v2.TB), \
+ "umlsl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smlsl(v0.TA, v1.TB, v2.TB), "smlsl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smlsl(v0.TA, v1.TB, v2.TB), \
+ "smlsl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smlsl2(v0.TA, v1.TB, v2.TB), \
- "smlsl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smlsl2(v0.TA, v1.TB, v2.TB), \
+ "smlsl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umlsl(v0.TA, v1.TB, v2.TB), "umlsl v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umlsl(v0.TA, v1.TB, v2.TB), \
+ "umlsl v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umlsl2(v0.TA, v1.TB, v2.TB), \
- "umlsl2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umlsl2(v0.TA, v1.TB, v2.TB), \
+ "umlsl2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smull(v0.TA, v1.TB, v2.TB), "smull v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smull(v0.TA, v1.TB, v2.TB), \
+ "smull v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Smull2(v0.TA, v1.TB, v2.TB), \
- "smull2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Smull2(v0.TA, v1.TB, v2.TB), \
+ "smull2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umull(v0.TA, v1.TB, v2.TB), "umull v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umull(v0.TA, v1.TB, v2.TB), \
+ "umull v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Umull2(v0.TA, v1.TB, v2.TB), \
- "umull2 v0." TAS ", v1." TBS ", v2." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Umull2(v0.TA, v1.TB, v2.TB), \
+ "umull2 v0." TAS ", v1." TBS ", v2." TBS);
NEON_FORMAT_LIST_LW2(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Sqdmull(v0.V4S(), v1.V4H(), v2.V4H()), "sqdmull v0.4s, v1.4h, v2.4h");
- COMPARE(Sqdmull(v1.V2D(), v2.V2S(), v3.V2S()), "sqdmull v1.2d, v2.2s, v3.2s");
- COMPARE(Sqdmull2(v2.V4S(), v3.V8H(), v4.V8H()),
- "sqdmull2 v2.4s, v3.8h, v4.8h");
- COMPARE(Sqdmull2(v3.V2D(), v4.V4S(), v5.V4S()),
- "sqdmull2 v3.2d, v4.4s, v5.4s");
- COMPARE(Sqdmull(s0, h1, h2), "sqdmull s0, h1, h2");
- COMPARE(Sqdmull(d1, s2, s3), "sqdmull d1, s2, s3");
+ COMPARE_MACRO(Sqdmull(v0.V4S(), v1.V4H(), v2.V4H()),
+ "sqdmull v0.4s, v1.4h, v2.4h");
+ COMPARE_MACRO(Sqdmull(v1.V2D(), v2.V2S(), v3.V2S()),
+ "sqdmull v1.2d, v2.2s, v3.2s");
+ COMPARE_MACRO(Sqdmull2(v2.V4S(), v3.V8H(), v4.V8H()),
+ "sqdmull2 v2.4s, v3.8h, v4.8h");
+ COMPARE_MACRO(Sqdmull2(v3.V2D(), v4.V4S(), v5.V4S()),
+ "sqdmull2 v3.2d, v4.4s, v5.4s");
+ COMPARE_MACRO(Sqdmull(s0, h1, h2), "sqdmull s0, h1, h2");
+ COMPARE_MACRO(Sqdmull(d1, s2, s3), "sqdmull d1, s2, s3");
- COMPARE(Sqdmlal(v0.V4S(), v1.V4H(), v2.V4H()), "sqdmlal v0.4s, v1.4h, v2.4h");
- COMPARE(Sqdmlal(v1.V2D(), v2.V2S(), v3.V2S()), "sqdmlal v1.2d, v2.2s, v3.2s");
- COMPARE(Sqdmlal2(v2.V4S(), v3.V8H(), v4.V8H()),
- "sqdmlal2 v2.4s, v3.8h, v4.8h");
- COMPARE(Sqdmlal2(v3.V2D(), v4.V4S(), v5.V4S()),
- "sqdmlal2 v3.2d, v4.4s, v5.4s");
- COMPARE(Sqdmlal(s0, h1, h2), "sqdmlal s0, h1, h2");
- COMPARE(Sqdmlal(d1, s2, s3), "sqdmlal d1, s2, s3");
+ COMPARE_MACRO(Sqdmlal(v0.V4S(), v1.V4H(), v2.V4H()),
+ "sqdmlal v0.4s, v1.4h, v2.4h");
+ COMPARE_MACRO(Sqdmlal(v1.V2D(), v2.V2S(), v3.V2S()),
+ "sqdmlal v1.2d, v2.2s, v3.2s");
+ COMPARE_MACRO(Sqdmlal2(v2.V4S(), v3.V8H(), v4.V8H()),
+ "sqdmlal2 v2.4s, v3.8h, v4.8h");
+ COMPARE_MACRO(Sqdmlal2(v3.V2D(), v4.V4S(), v5.V4S()),
+ "sqdmlal2 v3.2d, v4.4s, v5.4s");
+ COMPARE_MACRO(Sqdmlal(s0, h1, h2), "sqdmlal s0, h1, h2");
+ COMPARE_MACRO(Sqdmlal(d1, s2, s3), "sqdmlal d1, s2, s3");
- COMPARE(Sqdmlsl(v0.V4S(), v1.V4H(), v2.V4H()), "sqdmlsl v0.4s, v1.4h, v2.4h");
- COMPARE(Sqdmlsl(v1.V2D(), v2.V2S(), v3.V2S()), "sqdmlsl v1.2d, v2.2s, v3.2s");
- COMPARE(Sqdmlsl2(v2.V4S(), v3.V8H(), v4.V8H()),
- "sqdmlsl2 v2.4s, v3.8h, v4.8h");
- COMPARE(Sqdmlsl2(v3.V2D(), v4.V4S(), v5.V4S()),
- "sqdmlsl2 v3.2d, v4.4s, v5.4s");
- COMPARE(Sqdmlsl(s0, h1, h2), "sqdmlsl s0, h1, h2");
- COMPARE(Sqdmlsl(d1, s2, s3), "sqdmlsl d1, s2, s3");
+ COMPARE_MACRO(Sqdmlsl(v0.V4S(), v1.V4H(), v2.V4H()),
+ "sqdmlsl v0.4s, v1.4h, v2.4h");
+ COMPARE_MACRO(Sqdmlsl(v1.V2D(), v2.V2S(), v3.V2S()),
+ "sqdmlsl v1.2d, v2.2s, v3.2s");
+ COMPARE_MACRO(Sqdmlsl2(v2.V4S(), v3.V8H(), v4.V8H()),
+ "sqdmlsl2 v2.4s, v3.8h, v4.8h");
+ COMPARE_MACRO(Sqdmlsl2(v3.V2D(), v4.V4S(), v5.V4S()),
+ "sqdmlsl2 v3.2d, v4.4s, v5.4s");
+ COMPARE_MACRO(Sqdmlsl(s0, h1, h2), "sqdmlsl s0, h1, h2");
+ COMPARE_MACRO(Sqdmlsl(d1, s2, s3), "sqdmlsl d1, s2, s3");
- COMPARE(Addhn(v0.V8B(), v1.V8H(), v2.V8H()), "addhn v0.8b, v1.8h, v2.8h");
- COMPARE(Addhn(v1.V4H(), v2.V4S(), v3.V4S()), "addhn v1.4h, v2.4s, v3.4s");
- COMPARE(Addhn(v2.V2S(), v3.V2D(), v4.V2D()), "addhn v2.2s, v3.2d, v4.2d");
- COMPARE(Addhn2(v0.V16B(), v1.V8H(), v5.V8H()), "addhn2 v0.16b, v1.8h, v5.8h");
- COMPARE(Addhn2(v1.V8H(), v2.V4S(), v6.V4S()), "addhn2 v1.8h, v2.4s, v6.4s");
- COMPARE(Addhn2(v2.V4S(), v3.V2D(), v7.V2D()), "addhn2 v2.4s, v3.2d, v7.2d");
+ COMPARE_MACRO(Addhn(v0.V8B(), v1.V8H(), v2.V8H()),
+ "addhn v0.8b, v1.8h, v2.8h");
+ COMPARE_MACRO(Addhn(v1.V4H(), v2.V4S(), v3.V4S()),
+ "addhn v1.4h, v2.4s, v3.4s");
+ COMPARE_MACRO(Addhn(v2.V2S(), v3.V2D(), v4.V2D()),
+ "addhn v2.2s, v3.2d, v4.2d");
+ COMPARE_MACRO(Addhn2(v0.V16B(), v1.V8H(), v5.V8H()),
+ "addhn2 v0.16b, v1.8h, v5.8h");
+ COMPARE_MACRO(Addhn2(v1.V8H(), v2.V4S(), v6.V4S()),
+ "addhn2 v1.8h, v2.4s, v6.4s");
+ COMPARE_MACRO(Addhn2(v2.V4S(), v3.V2D(), v7.V2D()),
+ "addhn2 v2.4s, v3.2d, v7.2d");
- COMPARE(Raddhn(v0.V8B(), v1.V8H(), v2.V8H()), "raddhn v0.8b, v1.8h, v2.8h");
- COMPARE(Raddhn(v1.V4H(), v2.V4S(), v3.V4S()), "raddhn v1.4h, v2.4s, v3.4s");
- COMPARE(Raddhn(v2.V2S(), v3.V2D(), v4.V2D()), "raddhn v2.2s, v3.2d, v4.2d");
- COMPARE(Raddhn2(v0.V16B(), v1.V8H(), v5.V8H()),
- "raddhn2 v0.16b, v1.8h, v5.8h");
- COMPARE(Raddhn2(v1.V8H(), v2.V4S(), v6.V4S()), "raddhn2 v1.8h, v2.4s, v6.4s");
- COMPARE(Raddhn2(v2.V4S(), v3.V2D(), v7.V2D()), "raddhn2 v2.4s, v3.2d, v7.2d");
+ COMPARE_MACRO(Raddhn(v0.V8B(), v1.V8H(), v2.V8H()),
+ "raddhn v0.8b, v1.8h, v2.8h");
+ COMPARE_MACRO(Raddhn(v1.V4H(), v2.V4S(), v3.V4S()),
+ "raddhn v1.4h, v2.4s, v3.4s");
+ COMPARE_MACRO(Raddhn(v2.V2S(), v3.V2D(), v4.V2D()),
+ "raddhn v2.2s, v3.2d, v4.2d");
+ COMPARE_MACRO(Raddhn2(v0.V16B(), v1.V8H(), v5.V8H()),
+ "raddhn2 v0.16b, v1.8h, v5.8h");
+ COMPARE_MACRO(Raddhn2(v1.V8H(), v2.V4S(), v6.V4S()),
+ "raddhn2 v1.8h, v2.4s, v6.4s");
+ COMPARE_MACRO(Raddhn2(v2.V4S(), v3.V2D(), v7.V2D()),
+ "raddhn2 v2.4s, v3.2d, v7.2d");
- COMPARE(Subhn(v1.V4H(), v2.V4S(), v3.V4S()), "subhn v1.4h, v2.4s, v3.4s");
- COMPARE(Subhn(v2.V2S(), v3.V2D(), v4.V2D()), "subhn v2.2s, v3.2d, v4.2d");
- COMPARE(Subhn2(v0.V16B(), v1.V8H(), v5.V8H()), "subhn2 v0.16b, v1.8h, v5.8h");
- COMPARE(Subhn2(v1.V8H(), v2.V4S(), v6.V4S()), "subhn2 v1.8h, v2.4s, v6.4s");
- COMPARE(Subhn2(v2.V4S(), v3.V2D(), v7.V2D()), "subhn2 v2.4s, v3.2d, v7.2d");
+ COMPARE_MACRO(Subhn(v1.V4H(), v2.V4S(), v3.V4S()),
+ "subhn v1.4h, v2.4s, v3.4s");
+ COMPARE_MACRO(Subhn(v2.V2S(), v3.V2D(), v4.V2D()),
+ "subhn v2.2s, v3.2d, v4.2d");
+ COMPARE_MACRO(Subhn2(v0.V16B(), v1.V8H(), v5.V8H()),
+ "subhn2 v0.16b, v1.8h, v5.8h");
+ COMPARE_MACRO(Subhn2(v1.V8H(), v2.V4S(), v6.V4S()),
+ "subhn2 v1.8h, v2.4s, v6.4s");
+ COMPARE_MACRO(Subhn2(v2.V4S(), v3.V2D(), v7.V2D()),
+ "subhn2 v2.4s, v3.2d, v7.2d");
- COMPARE(Rsubhn(v0.V8B(), v1.V8H(), v2.V8H()), "rsubhn v0.8b, v1.8h, v2.8h");
- COMPARE(Rsubhn(v1.V4H(), v2.V4S(), v3.V4S()), "rsubhn v1.4h, v2.4s, v3.4s");
- COMPARE(Rsubhn(v2.V2S(), v3.V2D(), v4.V2D()), "rsubhn v2.2s, v3.2d, v4.2d");
- COMPARE(Rsubhn2(v0.V16B(), v1.V8H(), v5.V8H()),
- "rsubhn2 v0.16b, v1.8h, v5.8h");
- COMPARE(Rsubhn2(v1.V8H(), v2.V4S(), v6.V4S()), "rsubhn2 v1.8h, v2.4s, v6.4s");
- COMPARE(Rsubhn2(v2.V4S(), v3.V2D(), v7.V2D()), "rsubhn2 v2.4s, v3.2d, v7.2d");
+ COMPARE_MACRO(Rsubhn(v0.V8B(), v1.V8H(), v2.V8H()),
+ "rsubhn v0.8b, v1.8h, v2.8h");
+ COMPARE_MACRO(Rsubhn(v1.V4H(), v2.V4S(), v3.V4S()),
+ "rsubhn v1.4h, v2.4s, v3.4s");
+ COMPARE_MACRO(Rsubhn(v2.V2S(), v3.V2D(), v4.V2D()),
+ "rsubhn v2.2s, v3.2d, v4.2d");
+ COMPARE_MACRO(Rsubhn2(v0.V16B(), v1.V8H(), v5.V8H()),
+ "rsubhn2 v0.16b, v1.8h, v5.8h");
+ COMPARE_MACRO(Rsubhn2(v1.V8H(), v2.V4S(), v6.V4S()),
+ "rsubhn2 v1.8h, v2.4s, v6.4s");
+ COMPARE_MACRO(Rsubhn2(v2.V4S(), v3.V2D(), v7.V2D()),
+ "rsubhn2 v2.4s, v3.2d, v7.2d");
- COMPARE(Pmull(v0.V8H(), v1.V8B(), v2.V8B()), "pmull v0.8h, v1.8b, v2.8b");
- COMPARE(Pmull2(v2.V8H(), v3.V16B(), v4.V16B()),
- "pmull2 v2.8h, v3.16b, v4.16b");
+ COMPARE_MACRO(Pmull(v0.V8H(), v1.V8B(), v2.V8B()),
+ "pmull v0.8h, v1.8b, v2.8b");
+ COMPARE_MACRO(Pmull2(v2.V8H(), v3.V16B(), v4.V16B()),
+ "pmull2 v2.8h, v3.16b, v4.16b");
CLEANUP();
}
TEST(neon_perm) {
- SETUP_MACRO();
+ SETUP();
- #define DISASM_INST(M, S) \
- COMPARE(Trn1(v0.M, v1.M, v2.M), "trn1 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Trn1(v0.M, v1.M, v2.M), "trn1 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Trn2(v0.M, v1.M, v2.M), "trn2 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Trn2(v0.M, v1.M, v2.M), "trn2 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uzp1(v0.M, v1.M, v2.M), "uzp1 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uzp1(v0.M, v1.M, v2.M), "uzp1 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Uzp2(v0.M, v1.M, v2.M), "uzp2 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Uzp2(v0.M, v1.M, v2.M), "uzp2 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Zip1(v0.M, v1.M, v2.M), "zip1 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Zip1(v0.M, v1.M, v2.M), "zip1 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Zip2(v0.M, v1.M, v2.M), "zip2 v0." S ", v1." S ", v2." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Zip2(v0.M, v1.M, v2.M), "zip2 v0." S ", v1." S ", v2." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
CLEANUP();
}
TEST(neon_copy) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Ins(v1.V16B(), 4, v5.V16B(), 0), "mov v1.b[4], v5.b[0]");
- COMPARE(Ins(v2.V8B(), 5, v6.V8B(), 1), "mov v2.b[5], v6.b[1]");
- COMPARE(Ins(v3.B(), 6, v7.B(), 2), "mov v3.b[6], v7.b[2]");
- COMPARE(Ins(v4.V8H(), 7, v8.V8H(), 3), "mov v4.h[7], v8.h[3]");
- COMPARE(Ins(v5.V4H(), 3, v9.V4H(), 0), "mov v5.h[3], v9.h[0]");
- COMPARE(Ins(v6.H(), 6, v1.H(), 1), "mov v6.h[6], v1.h[1]");
- COMPARE(Ins(v7.V4S(), 2, v2.V4S(), 2), "mov v7.s[2], v2.s[2]");
- COMPARE(Ins(v8.V2S(), 1, v3.V2S(), 0), "mov v8.s[1], v3.s[0]");
- COMPARE(Ins(v9.S(), 0, v4.S(), 1), "mov v9.s[0], v4.s[1]");
- COMPARE(Ins(v1.V2D(), 1, v5.V2D(), 0), "mov v1.d[1], v5.d[0]");
- COMPARE(Ins(v2.D(), 0, v6.D(), 1), "mov v2.d[0], v6.d[1]");
+ COMPARE_MACRO(Ins(v1.V16B(), 4, v5.V16B(), 0), "mov v1.b[4], v5.b[0]");
+ COMPARE_MACRO(Ins(v2.V8B(), 5, v6.V8B(), 1), "mov v2.b[5], v6.b[1]");
+ COMPARE_MACRO(Ins(v3.B(), 6, v7.B(), 2), "mov v3.b[6], v7.b[2]");
+ COMPARE_MACRO(Ins(v4.V8H(), 7, v8.V8H(), 3), "mov v4.h[7], v8.h[3]");
+ COMPARE_MACRO(Ins(v5.V4H(), 3, v9.V4H(), 0), "mov v5.h[3], v9.h[0]");
+ COMPARE_MACRO(Ins(v6.H(), 6, v1.H(), 1), "mov v6.h[6], v1.h[1]");
+ COMPARE_MACRO(Ins(v7.V4S(), 2, v2.V4S(), 2), "mov v7.s[2], v2.s[2]");
+ COMPARE_MACRO(Ins(v8.V2S(), 1, v3.V2S(), 0), "mov v8.s[1], v3.s[0]");
+ COMPARE_MACRO(Ins(v9.S(), 0, v4.S(), 1), "mov v9.s[0], v4.s[1]");
+ COMPARE_MACRO(Ins(v1.V2D(), 1, v5.V2D(), 0), "mov v1.d[1], v5.d[0]");
+ COMPARE_MACRO(Ins(v2.D(), 0, v6.D(), 1), "mov v2.d[0], v6.d[1]");
- COMPARE(Mov(v3.V16B(), 4, v7.V16B(), 0), "mov v3.b[4], v7.b[0]");
- COMPARE(Mov(v4.V8B(), 5, v8.V8B(), 1), "mov v4.b[5], v8.b[1]");
- COMPARE(Mov(v5.B(), 6, v9.B(), 2), "mov v5.b[6], v9.b[2]");
- COMPARE(Mov(v6.V8H(), 7, v1.V8H(), 3), "mov v6.h[7], v1.h[3]");
- COMPARE(Mov(v7.V4H(), 0, v2.V4H(), 0), "mov v7.h[0], v2.h[0]");
- COMPARE(Mov(v8.H(), 1, v3.H(), 1), "mov v8.h[1], v3.h[1]");
- COMPARE(Mov(v9.V4S(), 2, v4.V4S(), 2), "mov v9.s[2], v4.s[2]");
- COMPARE(Mov(v1.V2S(), 3, v5.V2S(), 0), "mov v1.s[3], v5.s[0]");
- COMPARE(Mov(v2.S(), 0, v6.S(), 1), "mov v2.s[0], v6.s[1]");
- COMPARE(Mov(v3.V2D(), 1, v7.V2D(), 0), "mov v3.d[1], v7.d[0]");
- COMPARE(Mov(v4.D(), 0, v8.D(), 1), "mov v4.d[0], v8.d[1]");
+ COMPARE_MACRO(Mov(v3.V16B(), 4, v7.V16B(), 0), "mov v3.b[4], v7.b[0]");
+ COMPARE_MACRO(Mov(v4.V8B(), 5, v8.V8B(), 1), "mov v4.b[5], v8.b[1]");
+ COMPARE_MACRO(Mov(v5.B(), 6, v9.B(), 2), "mov v5.b[6], v9.b[2]");
+ COMPARE_MACRO(Mov(v6.V8H(), 7, v1.V8H(), 3), "mov v6.h[7], v1.h[3]");
+ COMPARE_MACRO(Mov(v7.V4H(), 0, v2.V4H(), 0), "mov v7.h[0], v2.h[0]");
+ COMPARE_MACRO(Mov(v8.H(), 1, v3.H(), 1), "mov v8.h[1], v3.h[1]");
+ COMPARE_MACRO(Mov(v9.V4S(), 2, v4.V4S(), 2), "mov v9.s[2], v4.s[2]");
+ COMPARE_MACRO(Mov(v1.V2S(), 3, v5.V2S(), 0), "mov v1.s[3], v5.s[0]");
+ COMPARE_MACRO(Mov(v2.S(), 0, v6.S(), 1), "mov v2.s[0], v6.s[1]");
+ COMPARE_MACRO(Mov(v3.V2D(), 1, v7.V2D(), 0), "mov v3.d[1], v7.d[0]");
+ COMPARE_MACRO(Mov(v4.D(), 0, v8.D(), 1), "mov v4.d[0], v8.d[1]");
- COMPARE(Ins(v1.V16B(), 4, w0), "mov v1.b[4], w0");
- COMPARE(Ins(v2.V8B(), 5, w1), "mov v2.b[5], w1");
- COMPARE(Ins(v3.B(), 6, w2), "mov v3.b[6], w2");
- COMPARE(Ins(v4.V8H(), 7, w3), "mov v4.h[7], w3");
- COMPARE(Ins(v5.V4H(), 3, w0), "mov v5.h[3], w0");
- COMPARE(Ins(v6.H(), 6, w1), "mov v6.h[6], w1");
- COMPARE(Ins(v7.V4S(), 2, w2), "mov v7.s[2], w2");
- COMPARE(Ins(v8.V2S(), 1, w0), "mov v8.s[1], w0");
- COMPARE(Ins(v9.S(), 0, w1), "mov v9.s[0], w1");
- COMPARE(Ins(v1.V2D(), 1, x0), "mov v1.d[1], x0");
- COMPARE(Ins(v2.D(), 0, x1), "mov v2.d[0], x1");
+ COMPARE_MACRO(Ins(v1.V16B(), 4, w0), "mov v1.b[4], w0");
+ COMPARE_MACRO(Ins(v2.V8B(), 5, w1), "mov v2.b[5], w1");
+ COMPARE_MACRO(Ins(v3.B(), 6, w2), "mov v3.b[6], w2");
+ COMPARE_MACRO(Ins(v4.V8H(), 7, w3), "mov v4.h[7], w3");
+ COMPARE_MACRO(Ins(v5.V4H(), 3, w0), "mov v5.h[3], w0");
+ COMPARE_MACRO(Ins(v6.H(), 6, w1), "mov v6.h[6], w1");
+ COMPARE_MACRO(Ins(v7.V4S(), 2, w2), "mov v7.s[2], w2");
+ COMPARE_MACRO(Ins(v8.V2S(), 1, w0), "mov v8.s[1], w0");
+ COMPARE_MACRO(Ins(v9.S(), 0, w1), "mov v9.s[0], w1");
+ COMPARE_MACRO(Ins(v1.V2D(), 1, x0), "mov v1.d[1], x0");
+ COMPARE_MACRO(Ins(v2.D(), 0, x1), "mov v2.d[0], x1");
- COMPARE(Mov(v1.V16B(), 4, w0), "mov v1.b[4], w0");
- COMPARE(Mov(v2.V8B(), 5, w1), "mov v2.b[5], w1");
- COMPARE(Mov(v3.B(), 6, w2), "mov v3.b[6], w2");
- COMPARE(Mov(v4.V8H(), 7, w3), "mov v4.h[7], w3");
- COMPARE(Mov(v5.V4H(), 3, w0), "mov v5.h[3], w0");
- COMPARE(Mov(v6.H(), 6, w1), "mov v6.h[6], w1");
- COMPARE(Mov(v7.V4S(), 2, w2), "mov v7.s[2], w2");
- COMPARE(Mov(v8.V2S(), 1, w0), "mov v8.s[1], w0");
- COMPARE(Mov(v9.S(), 0, w1), "mov v9.s[0], w1");
- COMPARE(Mov(v1.V2D(), 1, x0), "mov v1.d[1], x0");
- COMPARE(Mov(v2.D(), 0, x1), "mov v2.d[0], x1");
+ COMPARE_MACRO(Mov(v1.V16B(), 4, w0), "mov v1.b[4], w0");
+ COMPARE_MACRO(Mov(v2.V8B(), 5, w1), "mov v2.b[5], w1");
+ COMPARE_MACRO(Mov(v3.B(), 6, w2), "mov v3.b[6], w2");
+ COMPARE_MACRO(Mov(v4.V8H(), 7, w3), "mov v4.h[7], w3");
+ COMPARE_MACRO(Mov(v5.V4H(), 3, w0), "mov v5.h[3], w0");
+ COMPARE_MACRO(Mov(v6.H(), 6, w1), "mov v6.h[6], w1");
+ COMPARE_MACRO(Mov(v7.V4S(), 2, w2), "mov v7.s[2], w2");
+ COMPARE_MACRO(Mov(v8.V2S(), 1, w0), "mov v8.s[1], w0");
+ COMPARE_MACRO(Mov(v9.S(), 0, w1), "mov v9.s[0], w1");
+ COMPARE_MACRO(Mov(v1.V2D(), 1, x0), "mov v1.d[1], x0");
+ COMPARE_MACRO(Mov(v2.D(), 0, x1), "mov v2.d[0], x1");
- COMPARE(Dup(v5.V8B(), v9.V8B(), 6), "dup v5.8b, v9.b[6]");
- COMPARE(Dup(v6.V16B(), v1.V16B(), 5), "dup v6.16b, v1.b[5]");
- COMPARE(Dup(v7.V4H(), v2.V4H(), 4), "dup v7.4h, v2.h[4]");
- COMPARE(Dup(v8.V8H(), v3.V8H(), 3), "dup v8.8h, v3.h[3]");
- COMPARE(Dup(v9.V2S(), v4.V2S(), 2), "dup v9.2s, v4.s[2]");
- COMPARE(Dup(v1.V4S(), v5.V4S(), 1), "dup v1.4s, v5.s[1]");
- COMPARE(Dup(v2.V2D(), v6.V2D(), 0), "dup v2.2d, v6.d[0]");
+ COMPARE_MACRO(Dup(v5.V8B(), v9.V8B(), 6), "dup v5.8b, v9.b[6]");
+ COMPARE_MACRO(Dup(v6.V16B(), v1.V16B(), 5), "dup v6.16b, v1.b[5]");
+ COMPARE_MACRO(Dup(v7.V4H(), v2.V4H(), 4), "dup v7.4h, v2.h[4]");
+ COMPARE_MACRO(Dup(v8.V8H(), v3.V8H(), 3), "dup v8.8h, v3.h[3]");
+ COMPARE_MACRO(Dup(v9.V2S(), v4.V2S(), 2), "dup v9.2s, v4.s[2]");
+ COMPARE_MACRO(Dup(v1.V4S(), v5.V4S(), 1), "dup v1.4s, v5.s[1]");
+ COMPARE_MACRO(Dup(v2.V2D(), v6.V2D(), 0), "dup v2.2d, v6.d[0]");
- COMPARE(Dup(v5.B(), v9.B(), 6), "mov b5, v9.b[6]");
- COMPARE(Dup(v7.H(), v2.H(), 4), "mov h7, v2.h[4]");
- COMPARE(Dup(v9.S(), v4.S(), 2), "mov s9, v4.s[2]");
- COMPARE(Dup(v2.D(), v6.D(), 0), "mov d2, v6.d[0]");
+ COMPARE_MACRO(Dup(v5.B(), v9.B(), 6), "mov b5, v9.b[6]");
+ COMPARE_MACRO(Dup(v7.H(), v2.H(), 4), "mov h7, v2.h[4]");
+ COMPARE_MACRO(Dup(v9.S(), v4.S(), 2), "mov s9, v4.s[2]");
+ COMPARE_MACRO(Dup(v2.D(), v6.D(), 0), "mov d2, v6.d[0]");
- COMPARE(Mov(v5.B(), v9.B(), 6), "mov b5, v9.b[6]");
- COMPARE(Mov(v7.H(), v2.H(), 4), "mov h7, v2.h[4]");
- COMPARE(Mov(v9.S(), v4.S(), 2), "mov s9, v4.s[2]");
- COMPARE(Mov(v2.D(), v6.D(), 0), "mov d2, v6.d[0]");
+ COMPARE_MACRO(Mov(v5.B(), v9.B(), 6), "mov b5, v9.b[6]");
+ COMPARE_MACRO(Mov(v7.H(), v2.H(), 4), "mov h7, v2.h[4]");
+ COMPARE_MACRO(Mov(v9.S(), v4.S(), 2), "mov s9, v4.s[2]");
+ COMPARE_MACRO(Mov(v2.D(), v6.D(), 0), "mov d2, v6.d[0]");
- COMPARE(Mov(v0.B(), v1.V8B(), 7), "mov b0, v1.b[7]");
- COMPARE(Mov(b2, v3.V16B(), 15), "mov b2, v3.b[15]");
- COMPARE(Mov(v4.H(), v5.V4H(), 3), "mov h4, v5.h[3]");
- COMPARE(Mov(h6, v7.V8H(), 7), "mov h6, v7.h[7]");
- COMPARE(Mov(v8.S(), v9.V2S(), 1), "mov s8, v9.s[1]");
- COMPARE(Mov(s10, v11.V4S(), 3), "mov s10, v11.s[3]");
- COMPARE(Mov(v12.D(), v13.V2D(), 1), "mov d12, v13.d[1]");
+ COMPARE_MACRO(Mov(v0.B(), v1.V8B(), 7), "mov b0, v1.b[7]");
+ COMPARE_MACRO(Mov(b2, v3.V16B(), 15), "mov b2, v3.b[15]");
+ COMPARE_MACRO(Mov(v4.H(), v5.V4H(), 3), "mov h4, v5.h[3]");
+ COMPARE_MACRO(Mov(h6, v7.V8H(), 7), "mov h6, v7.h[7]");
+ COMPARE_MACRO(Mov(v8.S(), v9.V2S(), 1), "mov s8, v9.s[1]");
+ COMPARE_MACRO(Mov(s10, v11.V4S(), 3), "mov s10, v11.s[3]");
+ COMPARE_MACRO(Mov(v12.D(), v13.V2D(), 1), "mov d12, v13.d[1]");
- COMPARE(Dup(v5.V8B(), w0), "dup v5.8b, w0");
- COMPARE(Dup(v6.V16B(), w1), "dup v6.16b, w1");
- COMPARE(Dup(v7.V4H(), w2), "dup v7.4h, w2");
- COMPARE(Dup(v8.V8H(), w3), "dup v8.8h, w3");
- COMPARE(Dup(v9.V2S(), w4), "dup v9.2s, w4");
- COMPARE(Dup(v1.V4S(), w5), "dup v1.4s, w5");
- COMPARE(Dup(v2.V2D(), x6), "dup v2.2d, x6");
+ COMPARE_MACRO(Dup(v5.V8B(), w0), "dup v5.8b, w0");
+ COMPARE_MACRO(Dup(v6.V16B(), w1), "dup v6.16b, w1");
+ COMPARE_MACRO(Dup(v7.V4H(), w2), "dup v7.4h, w2");
+ COMPARE_MACRO(Dup(v8.V8H(), w3), "dup v8.8h, w3");
+ COMPARE_MACRO(Dup(v9.V2S(), w4), "dup v9.2s, w4");
+ COMPARE_MACRO(Dup(v1.V4S(), w5), "dup v1.4s, w5");
+ COMPARE_MACRO(Dup(v2.V2D(), x6), "dup v2.2d, x6");
- COMPARE(Smov(w0, v1.V16B(), 4), "smov w0, v1.b[4]");
- COMPARE(Smov(w1, v2.V8B(), 5), "smov w1, v2.b[5]");
- COMPARE(Smov(w2, v3.B(), 6), "smov w2, v3.b[6]");
- COMPARE(Smov(w3, v4.V8H(), 7), "smov w3, v4.h[7]");
- COMPARE(Smov(w0, v5.V4H(), 3), "smov w0, v5.h[3]");
- COMPARE(Smov(w1, v6.H(), 6), "smov w1, v6.h[6]");
+ COMPARE_MACRO(Smov(w0, v1.V16B(), 4), "smov w0, v1.b[4]");
+ COMPARE_MACRO(Smov(w1, v2.V8B(), 5), "smov w1, v2.b[5]");
+ COMPARE_MACRO(Smov(w2, v3.B(), 6), "smov w2, v3.b[6]");
+ COMPARE_MACRO(Smov(w3, v4.V8H(), 7), "smov w3, v4.h[7]");
+ COMPARE_MACRO(Smov(w0, v5.V4H(), 3), "smov w0, v5.h[3]");
+ COMPARE_MACRO(Smov(w1, v6.H(), 6), "smov w1, v6.h[6]");
- COMPARE(Smov(x0, v1.V16B(), 4), "smov x0, v1.b[4]");
- COMPARE(Smov(x1, v2.V8B(), 5), "smov x1, v2.b[5]");
- COMPARE(Smov(x2, v3.B(), 6), "smov x2, v3.b[6]");
- COMPARE(Smov(x3, v4.V8H(), 7), "smov x3, v4.h[7]");
- COMPARE(Smov(x0, v5.V4H(), 3), "smov x0, v5.h[3]");
- COMPARE(Smov(x1, v6.H(), 6), "smov x1, v6.h[6]");
- COMPARE(Smov(x2, v7.V4S(), 2), "smov x2, v7.s[2]");
- COMPARE(Smov(x0, v8.V2S(), 1), "smov x0, v8.s[1]");
- COMPARE(Smov(x1, v9.S(), 0), "smov x1, v9.s[0]");
+ COMPARE_MACRO(Smov(x0, v1.V16B(), 4), "smov x0, v1.b[4]");
+ COMPARE_MACRO(Smov(x1, v2.V8B(), 5), "smov x1, v2.b[5]");
+ COMPARE_MACRO(Smov(x2, v3.B(), 6), "smov x2, v3.b[6]");
+ COMPARE_MACRO(Smov(x3, v4.V8H(), 7), "smov x3, v4.h[7]");
+ COMPARE_MACRO(Smov(x0, v5.V4H(), 3), "smov x0, v5.h[3]");
+ COMPARE_MACRO(Smov(x1, v6.H(), 6), "smov x1, v6.h[6]");
+ COMPARE_MACRO(Smov(x2, v7.V4S(), 2), "smov x2, v7.s[2]");
+ COMPARE_MACRO(Smov(x0, v8.V2S(), 1), "smov x0, v8.s[1]");
+ COMPARE_MACRO(Smov(x1, v9.S(), 0), "smov x1, v9.s[0]");
- COMPARE(Umov(w0, v1.V16B(), 4), "umov w0, v1.b[4]");
- COMPARE(Umov(w1, v2.V8B(), 5), "umov w1, v2.b[5]");
- COMPARE(Umov(w2, v3.B(), 6), "umov w2, v3.b[6]");
- COMPARE(Umov(w3, v4.V8H(), 7), "umov w3, v4.h[7]");
- COMPARE(Umov(w0, v5.V4H(), 3), "umov w0, v5.h[3]");
- COMPARE(Umov(w1, v6.H(), 6), "umov w1, v6.h[6]");
- COMPARE(Umov(w2, v7.V4S(), 2), "mov w2, v7.s[2]");
- COMPARE(Umov(w0, v8.V2S(), 1), "mov w0, v8.s[1]");
- COMPARE(Umov(w1, v9.S(), 0), "mov w1, v9.s[0]");
- COMPARE(Umov(x0, v1.V2D(), 1), "mov x0, v1.d[1]");
- COMPARE(Umov(x1, v2.D(), 0), "mov x1, v2.d[0]");
+ COMPARE_MACRO(Umov(w0, v1.V16B(), 4), "umov w0, v1.b[4]");
+ COMPARE_MACRO(Umov(w1, v2.V8B(), 5), "umov w1, v2.b[5]");
+ COMPARE_MACRO(Umov(w2, v3.B(), 6), "umov w2, v3.b[6]");
+ COMPARE_MACRO(Umov(w3, v4.V8H(), 7), "umov w3, v4.h[7]");
+ COMPARE_MACRO(Umov(w0, v5.V4H(), 3), "umov w0, v5.h[3]");
+ COMPARE_MACRO(Umov(w1, v6.H(), 6), "umov w1, v6.h[6]");
+ COMPARE_MACRO(Umov(w2, v7.V4S(), 2), "mov w2, v7.s[2]");
+ COMPARE_MACRO(Umov(w0, v8.V2S(), 1), "mov w0, v8.s[1]");
+ COMPARE_MACRO(Umov(w1, v9.S(), 0), "mov w1, v9.s[0]");
+ COMPARE_MACRO(Umov(x0, v1.V2D(), 1), "mov x0, v1.d[1]");
+ COMPARE_MACRO(Umov(x1, v2.D(), 0), "mov x1, v2.d[0]");
- COMPARE(Mov(w2, v7.V4S(), 2), "mov w2, v7.s[2]");
- COMPARE(Mov(w0, v8.V2S(), 1), "mov w0, v8.s[1]");
- COMPARE(Mov(w1, v9.S(), 0), "mov w1, v9.s[0]");
- COMPARE(Mov(x0, v1.V2D(), 1), "mov x0, v1.d[1]");
- COMPARE(Mov(x1, v2.D(), 0), "mov x1, v2.d[0]");
+ COMPARE_MACRO(Mov(w2, v7.V4S(), 2), "mov w2, v7.s[2]");
+ COMPARE_MACRO(Mov(w0, v8.V2S(), 1), "mov w0, v8.s[1]");
+ COMPARE_MACRO(Mov(w1, v9.S(), 0), "mov w1, v9.s[0]");
+ COMPARE_MACRO(Mov(x0, v1.V2D(), 1), "mov x0, v1.d[1]");
+ COMPARE_MACRO(Mov(x1, v2.D(), 0), "mov x1, v2.d[0]");
CLEANUP();
}
TEST(neon_table) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Tbl(v0.V8B(), v1.V16B(), v2.V8B()), "tbl v0.8b, {v1.16b}, v2.8b");
- COMPARE(Tbl(v3.V8B(), v4.V16B(), v5.V16B(), v6.V8B()),
- "tbl v3.8b, {v4.16b, v5.16b}, v6.8b");
- COMPARE(Tbl(v7.V8B(), v8.V16B(), v9.V16B(), v10.V16B(), v11.V8B()),
- "tbl v7.8b, {v8.16b, v9.16b, v10.16b}, v11.8b");
- COMPARE(Tbl(v12.V8B(), v13.V16B(), v14.V16B(), v15.V16B(), v16.V16B(), v17.V8B()),
- "tbl v12.8b, {v13.16b, v14.16b, v15.16b, v16.16b}, v17.8b");
- COMPARE(Tbl(v18.V16B(), v19.V16B(), v20.V16B()), "tbl v18.16b, {v19.16b}, v20.16b");
- COMPARE(Tbl(v21.V16B(), v22.V16B(), v23.V16B(), v24.V16B()),
- "tbl v21.16b, {v22.16b, v23.16b}, v24.16b");
- COMPARE(Tbl(v25.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B()),
- "tbl v25.16b, {v26.16b, v27.16b, v28.16b}, v29.16b");
- COMPARE(Tbl(v30.V16B(), v31.V16B(), v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B()),
- "tbl v30.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v3.16b");
+ COMPARE_MACRO(Tbl(v0.V8B(), v1.V16B(), v2.V8B()),
+ "tbl v0.8b, {v1.16b}, v2.8b");
+ COMPARE_MACRO(Tbl(v3.V8B(), v4.V16B(), v5.V16B(), v6.V8B()),
+ "tbl v3.8b, {v4.16b, v5.16b}, v6.8b");
+ COMPARE_MACRO(Tbl(v7.V8B(), v8.V16B(), v9.V16B(), v10.V16B(), v11.V8B()),
+ "tbl v7.8b, {v8.16b, v9.16b, v10.16b}, v11.8b");
+ COMPARE_MACRO(Tbl(v12.V8B(),
+ v13.V16B(),
+ v14.V16B(),
+ v15.V16B(),
+ v16.V16B(),
+ v17.V8B()),
+ "tbl v12.8b, {v13.16b, v14.16b, v15.16b, v16.16b}, v17.8b");
+ COMPARE_MACRO(Tbl(v18.V16B(), v19.V16B(), v20.V16B()),
+ "tbl v18.16b, {v19.16b}, v20.16b");
+ COMPARE_MACRO(Tbl(v21.V16B(), v22.V16B(), v23.V16B(), v24.V16B()),
+ "tbl v21.16b, {v22.16b, v23.16b}, v24.16b");
+ COMPARE_MACRO(Tbl(v25.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B()),
+ "tbl v25.16b, {v26.16b, v27.16b, v28.16b}, v29.16b");
+ COMPARE_MACRO(Tbl(v30.V16B(),
+ v31.V16B(),
+ v0.V16B(),
+ v1.V16B(),
+ v2.V16B(),
+ v3.V16B()),
+ "tbl v30.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v3.16b");
- COMPARE(Tbx(v0.V8B(), v1.V16B(), v2.V8B()), "tbx v0.8b, {v1.16b}, v2.8b");
- COMPARE(Tbx(v3.V8B(), v4.V16B(), v5.V16B(), v6.V8B()),
- "tbx v3.8b, {v4.16b, v5.16b}, v6.8b");
- COMPARE(Tbx(v7.V8B(), v8.V16B(), v9.V16B(), v10.V16B(), v11.V8B()),
- "tbx v7.8b, {v8.16b, v9.16b, v10.16b}, v11.8b");
- COMPARE(Tbx(v12.V8B(), v13.V16B(), v14.V16B(), v15.V16B(), v16.V16B(), v17.V8B()),
- "tbx v12.8b, {v13.16b, v14.16b, v15.16b, v16.16b}, v17.8b");
- COMPARE(Tbx(v18.V16B(), v19.V16B(), v20.V16B()), "tbx v18.16b, {v19.16b}, v20.16b");
- COMPARE(Tbx(v21.V16B(), v22.V16B(), v23.V16B(), v24.V16B()),
- "tbx v21.16b, {v22.16b, v23.16b}, v24.16b");
- COMPARE(Tbx(v25.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B()),
- "tbx v25.16b, {v26.16b, v27.16b, v28.16b}, v29.16b");
- COMPARE(Tbx(v30.V16B(), v31.V16B(), v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B()),
- "tbx v30.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v3.16b");
+ COMPARE_MACRO(Tbx(v0.V8B(), v1.V16B(), v2.V8B()),
+ "tbx v0.8b, {v1.16b}, v2.8b");
+ COMPARE_MACRO(Tbx(v3.V8B(), v4.V16B(), v5.V16B(), v6.V8B()),
+ "tbx v3.8b, {v4.16b, v5.16b}, v6.8b");
+ COMPARE_MACRO(Tbx(v7.V8B(), v8.V16B(), v9.V16B(), v10.V16B(), v11.V8B()),
+ "tbx v7.8b, {v8.16b, v9.16b, v10.16b}, v11.8b");
+ COMPARE_MACRO(Tbx(v12.V8B(),
+ v13.V16B(),
+ v14.V16B(),
+ v15.V16B(),
+ v16.V16B(),
+ v17.V8B()),
+ "tbx v12.8b, {v13.16b, v14.16b, v15.16b, v16.16b}, v17.8b");
+ COMPARE_MACRO(Tbx(v18.V16B(), v19.V16B(), v20.V16B()),
+ "tbx v18.16b, {v19.16b}, v20.16b");
+ COMPARE_MACRO(Tbx(v21.V16B(), v22.V16B(), v23.V16B(), v24.V16B()),
+ "tbx v21.16b, {v22.16b, v23.16b}, v24.16b");
+ COMPARE_MACRO(Tbx(v25.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B()),
+ "tbx v25.16b, {v26.16b, v27.16b, v28.16b}, v29.16b");
+ COMPARE_MACRO(Tbx(v30.V16B(),
+ v31.V16B(),
+ v0.V16B(),
+ v1.V16B(),
+ v2.V16B(),
+ v3.V16B()),
+ "tbx v30.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v3.16b");
CLEANUP();
}
TEST(neon_extract) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Ext(v4.V8B(), v5.V8B(), v6.V8B(), 0), "ext v4.8b, v5.8b, v6.8b, #0");
- COMPARE(Ext(v1.V8B(), v2.V8B(), v3.V8B(), 7), "ext v1.8b, v2.8b, v3.8b, #7");
- COMPARE(Ext(v1.V16B(), v2.V16B(), v3.V16B(), 0),
- "ext v1.16b, v2.16b, v3.16b, #0");
- COMPARE(Ext(v1.V16B(), v2.V16B(), v3.V16B(), 15),
- "ext v1.16b, v2.16b, v3.16b, #15");
+ COMPARE_MACRO(Ext(v4.V8B(), v5.V8B(), v6.V8B(), 0),
+ "ext v4.8b, v5.8b, v6.8b, #0");
+ COMPARE_MACRO(Ext(v1.V8B(), v2.V8B(), v3.V8B(), 7),
+ "ext v1.8b, v2.8b, v3.8b, #7");
+ COMPARE_MACRO(Ext(v1.V16B(), v2.V16B(), v3.V16B(), 0),
+ "ext v1.16b, v2.16b, v3.16b, #0");
+ COMPARE_MACRO(Ext(v1.V16B(), v2.V16B(), v3.V16B(), 15),
+ "ext v1.16b, v2.16b, v3.16b, #15");
CLEANUP();
}
TEST(neon_modimm) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Orr(v4.V4H(), 0xaa, 0), "orr v4.4h, #0xaa, lsl #0");
- COMPARE(Orr(v1.V8H(), 0xcc, 8), "orr v1.8h, #0xcc, lsl #8");
- COMPARE(Orr(v4.V2S(), 0xaa, 0), "orr v4.2s, #0xaa, lsl #0");
- COMPARE(Orr(v1.V2S(), 0xcc, 8), "orr v1.2s, #0xcc, lsl #8");
- COMPARE(Orr(v4.V4S(), 0xaa, 16), "orr v4.4s, #0xaa, lsl #16");
- COMPARE(Orr(v1.V4S(), 0xcc, 24), "orr v1.4s, #0xcc, lsl #24");
+ COMPARE_MACRO(Orr(v4.V4H(), 0xaa, 0), "orr v4.4h, #0xaa, lsl #0");
+ COMPARE_MACRO(Orr(v1.V8H(), 0xcc, 8), "orr v1.8h, #0xcc, lsl #8");
+ COMPARE_MACRO(Orr(v4.V2S(), 0xaa, 0), "orr v4.2s, #0xaa, lsl #0");
+ COMPARE_MACRO(Orr(v1.V2S(), 0xcc, 8), "orr v1.2s, #0xcc, lsl #8");
+ COMPARE_MACRO(Orr(v4.V4S(), 0xaa, 16), "orr v4.4s, #0xaa, lsl #16");
+ COMPARE_MACRO(Orr(v1.V4S(), 0xcc, 24), "orr v1.4s, #0xcc, lsl #24");
- COMPARE(Bic(v4.V4H(), 0xaa, 0), "bic v4.4h, #0xaa, lsl #0");
- COMPARE(Bic(v1.V8H(), 0xcc, 8), "bic v1.8h, #0xcc, lsl #8");
- COMPARE(Bic(v4.V2S(), 0xaa, 0), "bic v4.2s, #0xaa, lsl #0");
- COMPARE(Bic(v1.V2S(), 0xcc, 8), "bic v1.2s, #0xcc, lsl #8");
- COMPARE(Bic(v4.V4S(), 0xaa, 16), "bic v4.4s, #0xaa, lsl #16");
- COMPARE(Bic(v1.V4S(), 0xcc, 24), "bic v1.4s, #0xcc, lsl #24");
+ COMPARE_MACRO(Bic(v4.V4H(), 0xaa, 0), "bic v4.4h, #0xaa, lsl #0");
+ COMPARE_MACRO(Bic(v1.V8H(), 0xcc, 8), "bic v1.8h, #0xcc, lsl #8");
+ COMPARE_MACRO(Bic(v4.V2S(), 0xaa, 0), "bic v4.2s, #0xaa, lsl #0");
+ COMPARE_MACRO(Bic(v1.V2S(), 0xcc, 8), "bic v1.2s, #0xcc, lsl #8");
+ COMPARE_MACRO(Bic(v4.V4S(), 0xaa, 16), "bic v4.4s, #0xaa, lsl #16");
+ COMPARE_MACRO(Bic(v1.V4S(), 0xcc, 24), "bic v1.4s, #0xcc, lsl #24");
- COMPARE(Mvni(v4.V4H(), 0xaa, LSL, 0), "mvni v4.4h, #0xaa, lsl #0");
- COMPARE(Mvni(v1.V8H(), 0xcc, LSL, 8), "mvni v1.8h, #0xcc, lsl #8");
- COMPARE(Mvni(v4.V2S(), 0xaa, LSL, 0), "mvni v4.2s, #0xaa, lsl #0");
- COMPARE(Mvni(v1.V2S(), 0xcc, LSL, 8), "mvni v1.2s, #0xcc, lsl #8");
- COMPARE(Mvni(v4.V4S(), 0xaa, LSL, 16), "mvni v4.4s, #0xaa, lsl #16");
- COMPARE(Mvni(v1.V4S(), 0xcc, LSL, 24), "mvni v1.4s, #0xcc, lsl #24");
+ COMPARE_MACRO(Mvni(v4.V4H(), 0xaa, LSL, 0), "mvni v4.4h, #0xaa, lsl #0");
+ COMPARE_MACRO(Mvni(v1.V8H(), 0xcc, LSL, 8), "mvni v1.8h, #0xcc, lsl #8");
+ COMPARE_MACRO(Mvni(v4.V2S(), 0xaa, LSL, 0), "mvni v4.2s, #0xaa, lsl #0");
+ COMPARE_MACRO(Mvni(v1.V2S(), 0xcc, LSL, 8), "mvni v1.2s, #0xcc, lsl #8");
+ COMPARE_MACRO(Mvni(v4.V4S(), 0xaa, LSL, 16), "mvni v4.4s, #0xaa, lsl #16");
+ COMPARE_MACRO(Mvni(v1.V4S(), 0xcc, LSL, 24), "mvni v1.4s, #0xcc, lsl #24");
- COMPARE(Mvni(v4.V2S(), 0xaa, MSL, 8), "mvni v4.2s, #0xaa, msl #8");
- COMPARE(Mvni(v1.V2S(), 0xcc, MSL, 16), "mvni v1.2s, #0xcc, msl #16");
- COMPARE(Mvni(v4.V4S(), 0xaa, MSL, 8), "mvni v4.4s, #0xaa, msl #8");
- COMPARE(Mvni(v1.V4S(), 0xcc, MSL, 16), "mvni v1.4s, #0xcc, msl #16");
+ COMPARE_MACRO(Mvni(v4.V2S(), 0xaa, MSL, 8), "mvni v4.2s, #0xaa, msl #8");
+ COMPARE_MACRO(Mvni(v1.V2S(), 0xcc, MSL, 16), "mvni v1.2s, #0xcc, msl #16");
+ COMPARE_MACRO(Mvni(v4.V4S(), 0xaa, MSL, 8), "mvni v4.4s, #0xaa, msl #8");
+ COMPARE_MACRO(Mvni(v1.V4S(), 0xcc, MSL, 16), "mvni v1.4s, #0xcc, msl #16");
- COMPARE(Movi(v4.V8B(), 0xaa), "movi v4.8b, #0xaa");
- COMPARE(Movi(v1.V16B(), 0xcc), "movi v1.16b, #0xcc");
+ COMPARE_MACRO(Movi(v4.V8B(), 0xaa), "movi v4.8b, #0xaa");
+ COMPARE_MACRO(Movi(v1.V16B(), 0xcc), "movi v1.16b, #0xcc");
- COMPARE(Movi(v4.V4H(), 0xaa, LSL, 0), "movi v4.4h, #0xaa, lsl #0");
- COMPARE(Movi(v1.V8H(), 0xcc, LSL, 8), "movi v1.8h, #0xcc, lsl #8");
+ COMPARE_MACRO(Movi(v4.V4H(), 0xaa, LSL, 0), "movi v4.4h, #0xaa, lsl #0");
+ COMPARE_MACRO(Movi(v1.V8H(), 0xcc, LSL, 8), "movi v1.8h, #0xcc, lsl #8");
- COMPARE(Movi(v4.V2S(), 0xaa, LSL, 0), "movi v4.2s, #0xaa, lsl #0");
- COMPARE(Movi(v1.V2S(), 0xcc, LSL, 8), "movi v1.2s, #0xcc, lsl #8");
- COMPARE(Movi(v4.V4S(), 0xaa, LSL, 16), "movi v4.4s, #0xaa, lsl #16");
- COMPARE(Movi(v1.V4S(), 0xcc, LSL, 24), "movi v1.4s, #0xcc, lsl #24");
+ COMPARE_MACRO(Movi(v4.V2S(), 0xaa, LSL, 0), "movi v4.2s, #0xaa, lsl #0");
+ COMPARE_MACRO(Movi(v1.V2S(), 0xcc, LSL, 8), "movi v1.2s, #0xcc, lsl #8");
+ COMPARE_MACRO(Movi(v4.V4S(), 0xaa, LSL, 16), "movi v4.4s, #0xaa, lsl #16");
+ COMPARE_MACRO(Movi(v1.V4S(), 0xcc, LSL, 24), "movi v1.4s, #0xcc, lsl #24");
- COMPARE(Movi(v4.V2S(), 0xaa, MSL, 8), "movi v4.2s, #0xaa, msl #8");
- COMPARE(Movi(v1.V2S(), 0xcc, MSL, 16), "movi v1.2s, #0xcc, msl #16");
- COMPARE(Movi(v4.V4S(), 0xaa, MSL, 8), "movi v4.4s, #0xaa, msl #8");
- COMPARE(Movi(v1.V4S(), 0xcc, MSL, 16), "movi v1.4s, #0xcc, msl #16");
+ COMPARE_MACRO(Movi(v4.V2S(), 0xaa, MSL, 8), "movi v4.2s, #0xaa, msl #8");
+ COMPARE_MACRO(Movi(v1.V2S(), 0xcc, MSL, 16), "movi v1.2s, #0xcc, msl #16");
+ COMPARE_MACRO(Movi(v4.V4S(), 0xaa, MSL, 8), "movi v4.4s, #0xaa, msl #8");
+ COMPARE_MACRO(Movi(v1.V4S(), 0xcc, MSL, 16), "movi v1.4s, #0xcc, msl #16");
- COMPARE(Movi(d2, 0xffff0000ffffff), "movi d2, #0xffff0000ffffff");
- COMPARE(Movi(v1.V2D(), 0xffff0000ffffff), "movi v1.2d, #0xffff0000ffffff");
+ COMPARE_MACRO(Movi(d2, 0xffff0000ffffff), "movi d2, #0xffff0000ffffff");
+ COMPARE_MACRO(Movi(v1.V2D(), 0xffff0000ffffff),
+ "movi v1.2d, #0xffff0000ffffff");
- COMPARE(Fmov(v0.V2S(), 1.0f), "fmov v0.2s, #0x70 (1.0000)");
- COMPARE(Fmov(v31.V2S(), -13.0f), "fmov v31.2s, #0xaa (-13.0000)");
- COMPARE(Fmov(v0.V4S(), 1.0f), "fmov v0.4s, #0x70 (1.0000)");
- COMPARE(Fmov(v31.V4S(), -13.0f), "fmov v31.4s, #0xaa (-13.0000)");
- COMPARE(Fmov(v1.V2D(), 1.0), "fmov v1.2d, #0x70 (1.0000)");
- COMPARE(Fmov(v29.V2D(), -13.0), "fmov v29.2d, #0xaa (-13.0000)");
+ COMPARE_MACRO(Fmov(v0.V2S(), 1.0f), "fmov v0.2s, #0x70 (1.0000)");
+ COMPARE_MACRO(Fmov(v31.V2S(), -13.0f), "fmov v31.2s, #0xaa (-13.0000)");
+ COMPARE_MACRO(Fmov(v0.V4S(), 1.0f), "fmov v0.4s, #0x70 (1.0000)");
+ COMPARE_MACRO(Fmov(v31.V4S(), -13.0f), "fmov v31.4s, #0xaa (-13.0000)");
+ COMPARE_MACRO(Fmov(v1.V2D(), 1.0), "fmov v1.2d, #0x70 (1.0000)");
+ COMPARE_MACRO(Fmov(v29.V2D(), -13.0), "fmov v29.2d, #0xaa (-13.0000)");
// An unallocated form of fmov.
COMPARE(dci(0x2f07ffff), "unallocated (NEONModifiedImmediate)");
@@ -5177,795 +5591,1097 @@
TEST(neon_2regmisc) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Shll(v1.V8H(), v8.V8B(), 8), "shll v1.8h, v8.8b, #8");
- COMPARE(Shll(v3.V4S(), v1.V4H(), 16), "shll v3.4s, v1.4h, #16");
- COMPARE(Shll(v5.V2D(), v3.V2S(), 32), "shll v5.2d, v3.2s, #32");
- COMPARE(Shll2(v2.V8H(), v9.V16B(), 8), "shll2 v2.8h, v9.16b, #8");
- COMPARE(Shll2(v4.V4S(), v2.V8H(), 16), "shll2 v4.4s, v2.8h, #16");
- COMPARE(Shll2(v6.V2D(), v4.V4S(), 32), "shll2 v6.2d, v4.4s, #32");
+ COMPARE_MACRO(Shll(v1.V8H(), v8.V8B(), 8), "shll v1.8h, v8.8b, #8");
+ COMPARE_MACRO(Shll(v3.V4S(), v1.V4H(), 16), "shll v3.4s, v1.4h, #16");
+ COMPARE_MACRO(Shll(v5.V2D(), v3.V2S(), 32), "shll v5.2d, v3.2s, #32");
+ COMPARE_MACRO(Shll2(v2.V8H(), v9.V16B(), 8), "shll2 v2.8h, v9.16b, #8");
+ COMPARE_MACRO(Shll2(v4.V4S(), v2.V8H(), 16), "shll2 v4.4s, v2.8h, #16");
+ COMPARE_MACRO(Shll2(v6.V2D(), v4.V4S(), 32), "shll2 v6.2d, v4.4s, #32");
// An unallocated form of shll.
COMPARE(dci(0x2ee13bff), "unallocated (NEON2RegMisc)");
// An unallocated form of shll2.
COMPARE(dci(0x6ee13bff), "unallocated (NEON2RegMisc)");
- #define DISASM_INST(M, S) \
- COMPARE(Cmeq(v0.M, v1.M, 0), "cmeq v0." S ", v1." S ", #0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmeq(v0.M, v1.M, 0), "cmeq v0." S ", v1." S ", #0");
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmge(v0.M, v1.M, 0), "cmge v0." S ", v1." S ", #0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmge(v0.M, v1.M, 0), "cmge v0." S ", v1." S ", #0");
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmgt(v0.M, v1.M, 0), "cmgt v0." S ", v1." S ", #0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmgt(v0.M, v1.M, 0), "cmgt v0." S ", v1." S ", #0");
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmle(v0.M, v1.M, 0), "cmle v0." S ", v1." S ", #0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmle(v0.M, v1.M, 0), "cmle v0." S ", v1." S ", #0");
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(M, S) \
- COMPARE(Cmlt(v0.M, v1.M, 0), "cmlt v0." S ", v1." S ", #0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Cmlt(v0.M, v1.M, 0), "cmlt v0." S ", v1." S ", #0");
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Cmeq(v0.D(), v1.D(), 0), "cmeq d0, d1, #0");
- COMPARE(Cmge(v3.D(), v4.D(), 0), "cmge d3, d4, #0");
- COMPARE(Cmgt(v6.D(), v7.D(), 0), "cmgt d6, d7, #0");
- COMPARE(Cmle(v0.D(), v1.D(), 0), "cmle d0, d1, #0");
- COMPARE(Cmlt(v3.D(), v4.D(), 0), "cmlt d3, d4, #0");
+ COMPARE_MACRO(Cmeq(v0.D(), v1.D(), 0), "cmeq d0, d1, #0");
+ COMPARE_MACRO(Cmge(v3.D(), v4.D(), 0), "cmge d3, d4, #0");
+ COMPARE_MACRO(Cmgt(v6.D(), v7.D(), 0), "cmgt d6, d7, #0");
+ COMPARE_MACRO(Cmle(v0.D(), v1.D(), 0), "cmle d0, d1, #0");
+ COMPARE_MACRO(Cmlt(v3.D(), v4.D(), 0), "cmlt d3, d4, #0");
- #define DISASM_INST(M, S) \
- COMPARE(Fcmeq(v0.M, v1.M, 0), "fcmeq v0." S ", v1." S ", #0.0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmeq(v0.M, v1.M, 0), "fcmeq v0." S ", v1." S ", #0.0");
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Fcmeq(v0.S(), v1.S(), 0), "fcmeq s0, s1, #0.0");
- COMPARE(Fcmeq(v0.D(), v1.D(), 0), "fcmeq d0, d1, #0.0");
+ COMPARE_MACRO(Fcmeq(v0.S(), v1.S(), 0),
+ "fcmeq s0, s1, "
+ "#0.0");
+ COMPARE_MACRO(Fcmeq(v0.D(), v1.D(), 0),
+ "fcmeq d0, d1, "
+ "#0.0");
- #define DISASM_INST(M, S) \
- COMPARE(Fcmge(v0.M, v1.M, 0), "fcmge v0." S ", v1." S ", #0.0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmge(v0.M, v1.M, 0), "fcmge v0." S ", v1." S ", #0.0");
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Fcmge(v0.S(), v1.S(), 0), "fcmge s0, s1, #0.0");
- COMPARE(Fcmge(v0.D(), v1.D(), 0), "fcmge d0, d1, #0.0");
+ COMPARE_MACRO(Fcmge(v0.S(), v1.S(), 0),
+ "fcmge s0, s1, "
+ "#0.0");
+ COMPARE_MACRO(Fcmge(v0.D(), v1.D(), 0),
+ "fcmge d0, d1, "
+ "#0.0");
- #define DISASM_INST(M, S) \
- COMPARE(Fcmgt(v0.M, v1.M, 0), "fcmgt v0." S ", v1." S ", #0.0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmgt(v0.M, v1.M, 0), "fcmgt v0." S ", v1." S ", #0.0");
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Fcmgt(v0.S(), v1.S(), 0), "fcmgt s0, s1, #0.0");
- COMPARE(Fcmgt(v0.D(), v1.D(), 0), "fcmgt d0, d1, #0.0");
+ COMPARE_MACRO(Fcmgt(v0.S(), v1.S(), 0),
+ "fcmgt s0, s1, "
+ "#0.0");
+ COMPARE_MACRO(Fcmgt(v0.D(), v1.D(), 0),
+ "fcmgt d0, d1, "
+ "#0.0");
- #define DISASM_INST(M, S) \
- COMPARE(Fcmle(v0.M, v1.M, 0), "fcmle v0." S ", v1." S ", #0.0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmle(v0.M, v1.M, 0), "fcmle v0." S ", v1." S ", #0.0");
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Fcmle(v0.S(), v1.S(), 0), "fcmle s0, s1, #0.0");
- COMPARE(Fcmle(v0.D(), v1.D(), 0), "fcmle d0, d1, #0.0");
+ COMPARE_MACRO(Fcmle(v0.S(), v1.S(), 0),
+ "fcmle s0, s1, "
+ "#0.0");
+ COMPARE_MACRO(Fcmle(v0.D(), v1.D(), 0),
+ "fcmle d0, d1, "
+ "#0.0");
- #define DISASM_INST(M, S) \
- COMPARE(Fcmlt(v0.M, v1.M, 0), "fcmlt v0." S ", v1." S ", #0.0");
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Fcmlt(v0.M, v1.M, 0), "fcmlt v0." S ", v1." S ", #0.0");
NEON_FORMAT_LIST_FP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Fcmlt(v0.S(), v1.S(), 0), "fcmlt s0, s1, #0.0");
- COMPARE(Fcmlt(v0.D(), v1.D(), 0), "fcmlt d0, d1, #0.0");
+ COMPARE_MACRO(Fcmlt(v0.S(), v1.S(), 0),
+ "fcmlt s0, s1, "
+ "#0.0");
+ COMPARE_MACRO(Fcmlt(v0.D(), v1.D(), 0),
+ "fcmlt d0, d1, "
+ "#0.0");
- #define DISASM_INST(M, S) \
- COMPARE(Neg(v0.M, v1.M), "neg v0." S ", v1." S);
+#define DISASM_INST(M, S) COMPARE_MACRO(Neg(v0.M, v1.M), "neg v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Neg(v0.D(), v1.D()), "neg d0, d1");
+ COMPARE_MACRO(Neg(v0.D(), v1.D()), "neg d0, d1");
- #define DISASM_INST(M, S) \
- COMPARE(Sqneg(v0.M, v1.M), "sqneg v0." S ", v1." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqneg(v0.M, v1.M), "sqneg v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Sqneg(b0, b1), "sqneg b0, b1");
- COMPARE(Sqneg(h1, h2), "sqneg h1, h2");
- COMPARE(Sqneg(s2, s3), "sqneg s2, s3");
- COMPARE(Sqneg(d3, d4), "sqneg d3, d4");
+ COMPARE_MACRO(Sqneg(b0, b1), "sqneg b0, b1");
+ COMPARE_MACRO(Sqneg(h1, h2), "sqneg h1, h2");
+ COMPARE_MACRO(Sqneg(s2, s3), "sqneg s2, s3");
+ COMPARE_MACRO(Sqneg(d3, d4), "sqneg d3, d4");
- #define DISASM_INST(M, S) \
- COMPARE(Abs(v0.M, v1.M), "abs v0." S ", v1." S);
+#define DISASM_INST(M, S) COMPARE_MACRO(Abs(v0.M, v1.M), "abs v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Abs(v0.D(), v1.D()), "abs d0, d1");
+ COMPARE_MACRO(Abs(v0.D(), v1.D()), "abs d0, d1");
- #define DISASM_INST(M, S) \
- COMPARE(Sqabs(v0.M, v1.M), "sqabs v0." S ", v1." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Sqabs(v0.M, v1.M), "sqabs v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Sqabs(b0, b1), "sqabs b0, b1");
- COMPARE(Sqabs(h1, h2), "sqabs h1, h2");
- COMPARE(Sqabs(s2, s3), "sqabs s2, s3");
- COMPARE(Sqabs(d3, d4), "sqabs d3, d4");
+ COMPARE_MACRO(Sqabs(b0, b1), "sqabs b0, b1");
+ COMPARE_MACRO(Sqabs(h1, h2), "sqabs h1, h2");
+ COMPARE_MACRO(Sqabs(s2, s3), "sqabs s2, s3");
+ COMPARE_MACRO(Sqabs(d3, d4), "sqabs d3, d4");
- #define DISASM_INST(M, S) \
- COMPARE(Suqadd(v0.M, v1.M), "suqadd v0." S ", v1." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Suqadd(v0.M, v1.M), "suqadd v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Suqadd(b0, b1), "suqadd b0, b1");
- COMPARE(Suqadd(h1, h2), "suqadd h1, h2");
- COMPARE(Suqadd(s2, s3), "suqadd s2, s3");
- COMPARE(Suqadd(d3, d4), "suqadd d3, d4");
+ COMPARE_MACRO(Suqadd(b0, b1), "suqadd b0, b1");
+ COMPARE_MACRO(Suqadd(h1, h2), "suqadd h1, h2");
+ COMPARE_MACRO(Suqadd(s2, s3), "suqadd s2, s3");
+ COMPARE_MACRO(Suqadd(d3, d4), "suqadd d3, d4");
- #define DISASM_INST(M, S) \
- COMPARE(Usqadd(v0.M, v1.M), "usqadd v0." S ", v1." S);
+#define DISASM_INST(M, S) \
+ COMPARE_MACRO(Usqadd(v0.M, v1.M), "usqadd v0." S ", v1." S);
NEON_FORMAT_LIST(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- COMPARE(Usqadd(b0, b1), "usqadd b0, b1");
- COMPARE(Usqadd(h1, h2), "usqadd h1, h2");
- COMPARE(Usqadd(s2, s3), "usqadd s2, s3");
- COMPARE(Usqadd(d3, d4), "usqadd d3, d4");
+ COMPARE_MACRO(Usqadd(b0, b1), "usqadd b0, b1");
+ COMPARE_MACRO(Usqadd(h1, h2), "usqadd h1, h2");
+ COMPARE_MACRO(Usqadd(s2, s3), "usqadd s2, s3");
+ COMPARE_MACRO(Usqadd(d3, d4), "usqadd d3, d4");
- COMPARE(Xtn(v0.V8B(), v1.V8H()), "xtn v0.8b, v1.8h");
- COMPARE(Xtn(v1.V4H(), v2.V4S()), "xtn v1.4h, v2.4s");
- COMPARE(Xtn(v2.V2S(), v3.V2D()), "xtn v2.2s, v3.2d");
- COMPARE(Xtn2(v0.V16B(), v1.V8H()), "xtn2 v0.16b, v1.8h");
- COMPARE(Xtn2(v1.V8H(), v2.V4S()), "xtn2 v1.8h, v2.4s");
- COMPARE(Xtn2(v2.V4S(), v3.V2D()), "xtn2 v2.4s, v3.2d");
+ COMPARE_MACRO(Xtn(v0.V8B(), v1.V8H()),
+ "xtn v0.8b, "
+ "v1.8h");
+ COMPARE_MACRO(Xtn(v1.V4H(), v2.V4S()),
+ "xtn v1.4h, "
+ "v2.4s");
+ COMPARE_MACRO(Xtn(v2.V2S(), v3.V2D()),
+ "xtn v2.2s, "
+ "v3.2d");
+ COMPARE_MACRO(Xtn2(v0.V16B(), v1.V8H()),
+ "xtn2 v0.16b, "
+ "v1.8h");
+ COMPARE_MACRO(Xtn2(v1.V8H(), v2.V4S()),
+ "xtn2 v1.8h, "
+ "v2.4s");
+ COMPARE_MACRO(Xtn2(v2.V4S(), v3.V2D()),
+ "xtn2 v2.4s, "
+ "v3.2d");
- COMPARE(Sqxtn(v0.V8B(), v1.V8H()), "sqxtn v0.8b, v1.8h");
- COMPARE(Sqxtn(v1.V4H(), v2.V4S()), "sqxtn v1.4h, v2.4s");
- COMPARE(Sqxtn(v2.V2S(), v3.V2D()), "sqxtn v2.2s, v3.2d");
- COMPARE(Sqxtn2(v0.V16B(), v1.V8H()), "sqxtn2 v0.16b, v1.8h");
- COMPARE(Sqxtn2(v1.V8H(), v2.V4S()), "sqxtn2 v1.8h, v2.4s");
- COMPARE(Sqxtn2(v2.V4S(), v3.V2D()), "sqxtn2 v2.4s, v3.2d");
- COMPARE(Sqxtn(b19, h0), "sqxtn b19, h0");
- COMPARE(Sqxtn(h20, s0), "sqxtn h20, s0") ;
- COMPARE(Sqxtn(s21, d0), "sqxtn s21, d0");
+ COMPARE_MACRO(Sqxtn(v0.V8B(), v1.V8H()),
+ "sqxtn v0.8b, "
+ "v1.8h");
+ COMPARE_MACRO(Sqxtn(v1.V4H(), v2.V4S()),
+ "sqxtn v1.4h, "
+ "v2.4s");
+ COMPARE_MACRO(Sqxtn(v2.V2S(), v3.V2D()),
+ "sqxtn v2.2s, "
+ "v3.2d");
+ COMPARE_MACRO(Sqxtn2(v0.V16B(), v1.V8H()),
+ "sqxtn2 v0.16b, "
+ "v1.8h");
+ COMPARE_MACRO(Sqxtn2(v1.V8H(), v2.V4S()),
+ "sqxtn2 v1.8h, "
+ "v2.4s");
+ COMPARE_MACRO(Sqxtn2(v2.V4S(), v3.V2D()),
+ "sqxtn2 v2.4s, "
+ "v3.2d");
+ COMPARE_MACRO(Sqxtn(b19, h0), "sqxtn b19, h0");
+ COMPARE_MACRO(Sqxtn(h20, s0), "sqxtn h20, s0");
+ COMPARE_MACRO(Sqxtn(s21, d0), "sqxtn s21, d0");
- COMPARE(Uqxtn(v0.V8B(), v1.V8H()), "uqxtn v0.8b, v1.8h");
- COMPARE(Uqxtn(v1.V4H(), v2.V4S()), "uqxtn v1.4h, v2.4s");
- COMPARE(Uqxtn(v2.V2S(), v3.V2D()), "uqxtn v2.2s, v3.2d");
- COMPARE(Uqxtn2(v0.V16B(), v1.V8H()), "uqxtn2 v0.16b, v1.8h");
- COMPARE(Uqxtn2(v1.V8H(), v2.V4S()), "uqxtn2 v1.8h, v2.4s");
- COMPARE(Uqxtn2(v2.V4S(), v3.V2D()), "uqxtn2 v2.4s, v3.2d");
- COMPARE(Uqxtn(b19, h0), "uqxtn b19, h0");
- COMPARE(Uqxtn(h20, s0), "uqxtn h20, s0") ;
- COMPARE(Uqxtn(s21, d0), "uqxtn s21, d0");
+ COMPARE_MACRO(Uqxtn(v0.V8B(), v1.V8H()),
+ "uqxtn v0.8b, "
+ "v1.8h");
+ COMPARE_MACRO(Uqxtn(v1.V4H(), v2.V4S()),
+ "uqxtn v1.4h, "
+ "v2.4s");
+ COMPARE_MACRO(Uqxtn(v2.V2S(), v3.V2D()),
+ "uqxtn v2.2s, "
+ "v3.2d");
+ COMPARE_MACRO(Uqxtn2(v0.V16B(), v1.V8H()),
+ "uqxtn2 v0.16b, "
+ "v1.8h");
+ COMPARE_MACRO(Uqxtn2(v1.V8H(), v2.V4S()),
+ "uqxtn2 v1.8h, "
+ "v2.4s");
+ COMPARE_MACRO(Uqxtn2(v2.V4S(), v3.V2D()),
+ "uqxtn2 v2.4s, "
+ "v3.2d");
+ COMPARE_MACRO(Uqxtn(b19, h0), "uqxtn b19, h0");
+ COMPARE_MACRO(Uqxtn(h20, s0), "uqxtn h20, s0");
+ COMPARE_MACRO(Uqxtn(s21, d0), "uqxtn s21, d0");
- COMPARE(Sqxtun(v0.V8B(), v1.V8H()), "sqxtun v0.8b, v1.8h");
- COMPARE(Sqxtun(v1.V4H(), v2.V4S()), "sqxtun v1.4h, v2.4s");
- COMPARE(Sqxtun(v2.V2S(), v3.V2D()), "sqxtun v2.2s, v3.2d");
- COMPARE(Sqxtun2(v0.V16B(), v1.V8H()), "sqxtun2 v0.16b, v1.8h");
- COMPARE(Sqxtun2(v1.V8H(), v2.V4S()), "sqxtun2 v1.8h, v2.4s");
- COMPARE(Sqxtun2(v2.V4S(), v3.V2D()), "sqxtun2 v2.4s, v3.2d");
- COMPARE(Sqxtun(b19, h0), "sqxtun b19, h0");
- COMPARE(Sqxtun(h20, s0), "sqxtun h20, s0") ;
- COMPARE(Sqxtun(s21, d0), "sqxtun s21, d0");
+ COMPARE_MACRO(Sqxtun(v0.V8B(), v1.V8H()),
+ "sqxtun v0.8b, "
+ "v1.8h");
+ COMPARE_MACRO(Sqxtun(v1.V4H(), v2.V4S()),
+ "sqxtun v1.4h, "
+ "v2.4s");
+ COMPARE_MACRO(Sqxtun(v2.V2S(), v3.V2D()),
+ "sqxtun v2.2s, "
+ "v3.2d");
+ COMPARE_MACRO(Sqxtun2(v0.V16B(), v1.V8H()),
+ "sqxtun2 v0.16b, "
+ "v1.8h");
+ COMPARE_MACRO(Sqxtun2(v1.V8H(), v2.V4S()),
+ "sqxtun2 v1.8h, "
+ "v2.4s");
+ COMPARE_MACRO(Sqxtun2(v2.V4S(), v3.V2D()),
+ "sqxtun2 v2.4s, "
+ "v3.2d");
+ COMPARE_MACRO(Sqxtun(b19, h0), "sqxtun b19, h0");
+ COMPARE_MACRO(Sqxtun(h20, s0), "sqxtun h20, s0");
+ COMPARE_MACRO(Sqxtun(s21, d0), "sqxtun s21, d0");
- COMPARE(Cls(v1.V8B(), v8.V8B()), "cls v1.8b, v8.8b");
- COMPARE(Cls(v2.V16B(), v9.V16B()), "cls v2.16b, v9.16b");
- COMPARE(Cls(v3.V4H(), v1.V4H()), "cls v3.4h, v1.4h");
- COMPARE(Cls(v4.V8H(), v2.V8H()), "cls v4.8h, v2.8h");
- COMPARE(Cls(v5.V2S(), v3.V2S()), "cls v5.2s, v3.2s");
- COMPARE(Cls(v6.V4S(), v4.V4S()), "cls v6.4s, v4.4s");
+ COMPARE_MACRO(Cls(v1.V8B(), v8.V8B()),
+ "cls v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Cls(v2.V16B(), v9.V16B()),
+ "cls v2.16b, "
+ "v9.16b");
+ COMPARE_MACRO(Cls(v3.V4H(), v1.V4H()),
+ "cls v3.4h, "
+ "v1.4h");
+ COMPARE_MACRO(Cls(v4.V8H(), v2.V8H()),
+ "cls v4.8h, "
+ "v2.8h");
+ COMPARE_MACRO(Cls(v5.V2S(), v3.V2S()),
+ "cls v5.2s, "
+ "v3.2s");
+ COMPARE_MACRO(Cls(v6.V4S(), v4.V4S()),
+ "cls v6.4s, "
+ "v4.4s");
- COMPARE(Clz(v1.V8B(), v8.V8B()), "clz v1.8b, v8.8b");
- COMPARE(Clz(v2.V16B(), v9.V16B()), "clz v2.16b, v9.16b");
- COMPARE(Clz(v3.V4H(), v1.V4H()), "clz v3.4h, v1.4h");
- COMPARE(Clz(v4.V8H(), v2.V8H()), "clz v4.8h, v2.8h");
- COMPARE(Clz(v5.V2S(), v3.V2S()), "clz v5.2s, v3.2s");
- COMPARE(Clz(v6.V4S(), v4.V4S()), "clz v6.4s, v4.4s");
+ COMPARE_MACRO(Clz(v1.V8B(), v8.V8B()),
+ "clz v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Clz(v2.V16B(), v9.V16B()),
+ "clz v2.16b, "
+ "v9.16b");
+ COMPARE_MACRO(Clz(v3.V4H(), v1.V4H()),
+ "clz v3.4h, "
+ "v1.4h");
+ COMPARE_MACRO(Clz(v4.V8H(), v2.V8H()),
+ "clz v4.8h, "
+ "v2.8h");
+ COMPARE_MACRO(Clz(v5.V2S(), v3.V2S()),
+ "clz v5.2s, "
+ "v3.2s");
+ COMPARE_MACRO(Clz(v6.V4S(), v4.V4S()),
+ "clz v6.4s, "
+ "v4.4s");
- COMPARE(Cnt(v1.V8B(), v8.V8B()), "cnt v1.8b, v8.8b");
- COMPARE(Cnt(v2.V16B(), v9.V16B()), "cnt v2.16b, v9.16b");
+ COMPARE_MACRO(Cnt(v1.V8B(), v8.V8B()),
+ "cnt v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Cnt(v2.V16B(), v9.V16B()),
+ "cnt v2.16b, "
+ "v9.16b");
- COMPARE(Mvn(v4.V8B(), v5.V8B()), "mvn v4.8b, v5.8b");
- COMPARE(Mvn(v4.V16B(), v5.V16B()), "mvn v4.16b, v5.16b");
+ COMPARE_MACRO(Mvn(v4.V8B(), v5.V8B()),
+ "mvn v4.8b, "
+ "v5.8b");
+ COMPARE_MACRO(Mvn(v4.V16B(), v5.V16B()),
+ "mvn v4.16b, "
+ "v5.16b");
- COMPARE(Not(v4.V8B(), v5.V8B()), "mvn v4.8b, v5.8b");
- COMPARE(Not(v4.V16B(), v5.V16B()), "mvn v4.16b, v5.16b");
+ COMPARE_MACRO(Not(v4.V8B(), v5.V8B()),
+ "mvn v4.8b, "
+ "v5.8b");
+ COMPARE_MACRO(Not(v4.V16B(), v5.V16B()),
+ "mvn v4.16b, "
+ "v5.16b");
- COMPARE(Rev64(v1.V8B(), v8.V8B()), "rev64 v1.8b, v8.8b");
- COMPARE(Rev64(v2.V16B(), v9.V16B()), "rev64 v2.16b, v9.16b");
- COMPARE(Rev64(v3.V4H(), v1.V4H()), "rev64 v3.4h, v1.4h");
- COMPARE(Rev64(v4.V8H(), v2.V8H()), "rev64 v4.8h, v2.8h");
- COMPARE(Rev64(v5.V2S(), v3.V2S()), "rev64 v5.2s, v3.2s");
- COMPARE(Rev64(v6.V4S(), v4.V4S()), "rev64 v6.4s, v4.4s");
+ COMPARE_MACRO(Rev64(v1.V8B(), v8.V8B()),
+ "rev64 v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Rev64(v2.V16B(), v9.V16B()),
+ "rev64 v2.16b, "
+ "v9.16b");
+ COMPARE_MACRO(Rev64(v3.V4H(), v1.V4H()),
+ "rev64 v3.4h, "
+ "v1.4h");
+ COMPARE_MACRO(Rev64(v4.V8H(), v2.V8H()),
+ "rev64 v4.8h, "
+ "v2.8h");
+ COMPARE_MACRO(Rev64(v5.V2S(), v3.V2S()),
+ "rev64 v5.2s, "
+ "v3.2s");
+ COMPARE_MACRO(Rev64(v6.V4S(), v4.V4S()),
+ "rev64 v6.4s, "
+ "v4.4s");
- COMPARE(Rev32(v1.V8B(), v8.V8B()), "rev32 v1.8b, v8.8b");
- COMPARE(Rev32(v2.V16B(), v9.V16B()), "rev32 v2.16b, v9.16b");
- COMPARE(Rev32(v3.V4H(), v1.V4H()), "rev32 v3.4h, v1.4h");
- COMPARE(Rev32(v4.V8H(), v2.V8H()), "rev32 v4.8h, v2.8h");
+ COMPARE_MACRO(Rev32(v1.V8B(), v8.V8B()),
+ "rev32 v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Rev32(v2.V16B(), v9.V16B()),
+ "rev32 v2.16b, "
+ "v9.16b");
+ COMPARE_MACRO(Rev32(v3.V4H(), v1.V4H()),
+ "rev32 v3.4h, "
+ "v1.4h");
+ COMPARE_MACRO(Rev32(v4.V8H(), v2.V8H()),
+ "rev32 v4.8h, "
+ "v2.8h");
- COMPARE(Rev16(v1.V8B(), v8.V8B()), "rev16 v1.8b, v8.8b");
- COMPARE(Rev16(v2.V16B(), v9.V16B()), "rev16 v2.16b, v9.16b");
+ COMPARE_MACRO(Rev16(v1.V8B(), v8.V8B()),
+ "rev16 v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Rev16(v2.V16B(), v9.V16B()),
+ "rev16 v2.16b, "
+ "v9.16b");
- COMPARE(Rbit(v1.V8B(), v8.V8B()), "rbit v1.8b, v8.8b");
- COMPARE(Rbit(v2.V16B(), v9.V16B()), "rbit v2.16b, v9.16b");
+ COMPARE_MACRO(Rbit(v1.V8B(), v8.V8B()),
+ "rbit v1.8b, "
+ "v8.8b");
+ COMPARE_MACRO(Rbit(v2.V16B(), v9.V16B()),
+ "rbit v2.16b, "
+ "v9.16b");
- COMPARE(Ursqrte(v2.V2S(), v9.V2S()), "ursqrte v2.2s, v9.2s");
- COMPARE(Ursqrte(v16.V4S(), v23.V4S()), "ursqrte v16.4s, v23.4s");
+ COMPARE_MACRO(Ursqrte(v2.V2S(), v9.V2S()),
+ "ursqrte v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Ursqrte(v16.V4S(), v23.V4S()),
+ "ursqrte v16.4s, "
+ "v23.4s");
- COMPARE(Urecpe(v2.V2S(), v9.V2S()), "urecpe v2.2s, v9.2s");
- COMPARE(Urecpe(v16.V4S(), v23.V4S()), "urecpe v16.4s, v23.4s");
+ COMPARE_MACRO(Urecpe(v2.V2S(), v9.V2S()),
+ "urecpe v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Urecpe(v16.V4S(), v23.V4S()),
+ "urecpe v16.4s, "
+ "v23.4s");
- COMPARE(Frsqrte(v2.V2S(), v9.V2S()), "frsqrte v2.2s, v9.2s");
- COMPARE(Frsqrte(v16.V4S(), v23.V4S()), "frsqrte v16.4s, v23.4s");
- COMPARE(Frsqrte(v2.V2D(), v9.V2D()), "frsqrte v2.2d, v9.2d");
- COMPARE(Frsqrte(v0.S(), v1.S()), "frsqrte s0, s1");
- COMPARE(Frsqrte(v0.D(), v1.D()), "frsqrte d0, d1");
+ COMPARE_MACRO(Frsqrte(v2.V2S(), v9.V2S()),
+ "frsqrte v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frsqrte(v16.V4S(), v23.V4S()),
+ "frsqrte v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frsqrte(v2.V2D(), v9.V2D()),
+ "frsqrte v2.2d, "
+ "v9.2d");
+ COMPARE_MACRO(Frsqrte(v0.S(), v1.S()), "frsqrte s0, s1");
+ COMPARE_MACRO(Frsqrte(v0.D(), v1.D()), "frsqrte d0, d1");
- COMPARE(Frecpe(v2.V2S(), v9.V2S()), "frecpe v2.2s, v9.2s");
- COMPARE(Frecpe(v16.V4S(), v23.V4S()), "frecpe v16.4s, v23.4s");
- COMPARE(Frecpe(v2.V2D(), v9.V2D()), "frecpe v2.2d, v9.2d");
- COMPARE(Frecpe(v0.S(), v1.S()), "frecpe s0, s1");
- COMPARE(Frecpe(v0.D(), v1.D()), "frecpe d0, d1");
+ COMPARE_MACRO(Frecpe(v2.V2S(), v9.V2S()),
+ "frecpe v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frecpe(v16.V4S(), v23.V4S()),
+ "frecpe v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frecpe(v2.V2D(), v9.V2D()),
+ "frecpe v2.2d, "
+ "v9.2d");
+ COMPARE_MACRO(Frecpe(v0.S(), v1.S()), "frecpe s0, s1");
+ COMPARE_MACRO(Frecpe(v0.D(), v1.D()), "frecpe d0, d1");
- COMPARE(Fabs(v2.V2S(), v9.V2S()), "fabs v2.2s, v9.2s");
- COMPARE(Fabs(v16.V4S(), v23.V4S()), "fabs v16.4s, v23.4s");
- COMPARE(Fabs(v31.V2D(), v30.V2D()), "fabs v31.2d, v30.2d");
+ COMPARE_MACRO(Fabs(v2.V2S(), v9.V2S()),
+ "fabs v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Fabs(v16.V4S(), v23.V4S()),
+ "fabs v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Fabs(v31.V2D(), v30.V2D()),
+ "fabs v31.2d, "
+ "v30.2d");
- COMPARE(Fneg(v2.V2S(), v9.V2S()), "fneg v2.2s, v9.2s");
- COMPARE(Fneg(v16.V4S(), v23.V4S()), "fneg v16.4s, v23.4s");
- COMPARE(Fneg(v31.V2D(), v30.V2D()), "fneg v31.2d, v30.2d");
+ COMPARE_MACRO(Fneg(v2.V2S(), v9.V2S()),
+ "fneg v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Fneg(v16.V4S(), v23.V4S()),
+ "fneg v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Fneg(v31.V2D(), v30.V2D()),
+ "fneg v31.2d, "
+ "v30.2d");
- COMPARE(Frintn(v2.V2S(), v9.V2S()), "frintn v2.2s, v9.2s");
- COMPARE(Frintn(v16.V4S(), v23.V4S()), "frintn v16.4s, v23.4s");
- COMPARE(Frintn(v31.V2D(), v30.V2D()), "frintn v31.2d, v30.2d");
+ COMPARE_MACRO(Frintn(v2.V2S(), v9.V2S()),
+ "frintn v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frintn(v16.V4S(), v23.V4S()),
+ "frintn v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frintn(v31.V2D(), v30.V2D()),
+ "frintn v31.2d, "
+ "v30.2d");
- COMPARE(Frinta(v2.V2S(), v9.V2S()), "frinta v2.2s, v9.2s");
- COMPARE(Frinta(v16.V4S(), v23.V4S()), "frinta v16.4s, v23.4s");
- COMPARE(Frinta(v31.V2D(), v30.V2D()), "frinta v31.2d, v30.2d");
+ COMPARE_MACRO(Frinta(v2.V2S(), v9.V2S()),
+ "frinta v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frinta(v16.V4S(), v23.V4S()),
+ "frinta v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frinta(v31.V2D(), v30.V2D()),
+ "frinta v31.2d, "
+ "v30.2d");
- COMPARE(Frintp(v2.V2S(), v9.V2S()), "frintp v2.2s, v9.2s");
- COMPARE(Frintp(v16.V4S(), v23.V4S()), "frintp v16.4s, v23.4s");
- COMPARE(Frintp(v31.V2D(), v30.V2D()), "frintp v31.2d, v30.2d");
+ COMPARE_MACRO(Frintp(v2.V2S(), v9.V2S()),
+ "frintp v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frintp(v16.V4S(), v23.V4S()),
+ "frintp v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frintp(v31.V2D(), v30.V2D()),
+ "frintp v31.2d, "
+ "v30.2d");
- COMPARE(Frintm(v2.V2S(), v9.V2S()), "frintm v2.2s, v9.2s");
- COMPARE(Frintm(v16.V4S(), v23.V4S()), "frintm v16.4s, v23.4s");
- COMPARE(Frintm(v31.V2D(), v30.V2D()), "frintm v31.2d, v30.2d");
+ COMPARE_MACRO(Frintm(v2.V2S(), v9.V2S()),
+ "frintm v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frintm(v16.V4S(), v23.V4S()),
+ "frintm v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frintm(v31.V2D(), v30.V2D()),
+ "frintm v31.2d, "
+ "v30.2d");
- COMPARE(Frintx(v2.V2S(), v9.V2S()), "frintx v2.2s, v9.2s");
- COMPARE(Frintx(v16.V4S(), v23.V4S()), "frintx v16.4s, v23.4s");
- COMPARE(Frintx(v31.V2D(), v30.V2D()), "frintx v31.2d, v30.2d");
+ COMPARE_MACRO(Frintx(v2.V2S(), v9.V2S()),
+ "frintx v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frintx(v16.V4S(), v23.V4S()),
+ "frintx v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frintx(v31.V2D(), v30.V2D()),
+ "frintx v31.2d, "
+ "v30.2d");
- COMPARE(Frintz(v2.V2S(), v9.V2S()), "frintz v2.2s, v9.2s");
- COMPARE(Frintz(v16.V4S(), v23.V4S()), "frintz v16.4s, v23.4s");
- COMPARE(Frintz(v31.V2D(), v30.V2D()), "frintz v31.2d, v30.2d");
+ COMPARE_MACRO(Frintz(v2.V2S(), v9.V2S()),
+ "frintz v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frintz(v16.V4S(), v23.V4S()),
+ "frintz v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frintz(v31.V2D(), v30.V2D()),
+ "frintz v31.2d, "
+ "v30.2d");
- COMPARE(Frinti(v2.V2S(), v9.V2S()), "frinti v2.2s, v9.2s");
- COMPARE(Frinti(v16.V4S(), v23.V4S()), "frinti v16.4s, v23.4s");
- COMPARE(Frinti(v31.V2D(), v30.V2D()), "frinti v31.2d, v30.2d");
+ COMPARE_MACRO(Frinti(v2.V2S(), v9.V2S()),
+ "frinti v2.2s, "
+ "v9.2s");
+ COMPARE_MACRO(Frinti(v16.V4S(), v23.V4S()),
+ "frinti v16.4s, "
+ "v23.4s");
+ COMPARE_MACRO(Frinti(v31.V2D(), v30.V2D()),
+ "frinti v31.2d, "
+ "v30.2d");
- COMPARE(Fsqrt(v3.V2S(), v10.V2S()), "fsqrt v3.2s, v10.2s");
- COMPARE(Fsqrt(v22.V4S(), v11.V4S()), "fsqrt v22.4s, v11.4s");
- COMPARE(Fsqrt(v31.V2D(), v0.V2D()), "fsqrt v31.2d, v0.2d");
+ COMPARE_MACRO(Fsqrt(v3.V2S(), v10.V2S()),
+ "fsqrt v3.2s, "
+ "v10.2s");
+ COMPARE_MACRO(Fsqrt(v22.V4S(), v11.V4S()),
+ "fsqrt v22.4s, "
+ "v11.4s");
+ COMPARE_MACRO(Fsqrt(v31.V2D(), v0.V2D()),
+ "fsqrt v31.2d, "
+ "v0.2d");
- COMPARE(Fcvtns(v4.V2S(), v11.V2S()), "fcvtns v4.2s, v11.2s");
- COMPARE(Fcvtns(v23.V4S(), v12.V4S()), "fcvtns v23.4s, v12.4s");
- COMPARE(Fcvtns(v30.V2D(), v1.V2D()), "fcvtns v30.2d, v1.2d");
- COMPARE(Fcvtnu(v4.V2S(), v11.V2S()), "fcvtnu v4.2s, v11.2s");
- COMPARE(Fcvtnu(v23.V4S(), v12.V4S()), "fcvtnu v23.4s, v12.4s");
- COMPARE(Fcvtnu(v30.V2D(), v1.V2D()), "fcvtnu v30.2d, v1.2d");
+ COMPARE_MACRO(Fcvtns(v4.V2S(), v11.V2S()),
+ "fcvtns v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtns(v23.V4S(), v12.V4S()),
+ "fcvtns v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtns(v30.V2D(), v1.V2D()),
+ "fcvtns v30.2d, "
+ "v1.2d");
+ COMPARE_MACRO(Fcvtnu(v4.V2S(), v11.V2S()),
+ "fcvtnu v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtnu(v23.V4S(), v12.V4S()),
+ "fcvtnu v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtnu(v30.V2D(), v1.V2D()),
+ "fcvtnu v30.2d, "
+ "v1.2d");
- COMPARE(Fcvtps(v4.V2S(), v11.V2S()), "fcvtps v4.2s, v11.2s");
- COMPARE(Fcvtps(v23.V4S(), v12.V4S()), "fcvtps v23.4s, v12.4s");
- COMPARE(Fcvtps(v30.V2D(), v1.V2D()), "fcvtps v30.2d, v1.2d");
- COMPARE(Fcvtpu(v4.V2S(), v11.V2S()), "fcvtpu v4.2s, v11.2s");
- COMPARE(Fcvtpu(v23.V4S(), v12.V4S()), "fcvtpu v23.4s, v12.4s");
- COMPARE(Fcvtpu(v30.V2D(), v1.V2D()), "fcvtpu v30.2d, v1.2d");
+ COMPARE_MACRO(Fcvtps(v4.V2S(), v11.V2S()),
+ "fcvtps v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtps(v23.V4S(), v12.V4S()),
+ "fcvtps v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtps(v30.V2D(), v1.V2D()),
+ "fcvtps v30.2d, "
+ "v1.2d");
+ COMPARE_MACRO(Fcvtpu(v4.V2S(), v11.V2S()),
+ "fcvtpu v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtpu(v23.V4S(), v12.V4S()),
+ "fcvtpu v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtpu(v30.V2D(), v1.V2D()),
+ "fcvtpu v30.2d, "
+ "v1.2d");
- COMPARE(Fcvtms(v4.V2S(), v11.V2S()), "fcvtms v4.2s, v11.2s");
- COMPARE(Fcvtms(v23.V4S(), v12.V4S()), "fcvtms v23.4s, v12.4s");
- COMPARE(Fcvtms(v30.V2D(), v1.V2D()), "fcvtms v30.2d, v1.2d");
- COMPARE(Fcvtmu(v4.V2S(), v11.V2S()), "fcvtmu v4.2s, v11.2s");
- COMPARE(Fcvtmu(v23.V4S(), v12.V4S()), "fcvtmu v23.4s, v12.4s");
- COMPARE(Fcvtmu(v30.V2D(), v1.V2D()), "fcvtmu v30.2d, v1.2d");
+ COMPARE_MACRO(Fcvtms(v4.V2S(), v11.V2S()),
+ "fcvtms v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtms(v23.V4S(), v12.V4S()),
+ "fcvtms v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtms(v30.V2D(), v1.V2D()),
+ "fcvtms v30.2d, "
+ "v1.2d");
+ COMPARE_MACRO(Fcvtmu(v4.V2S(), v11.V2S()),
+ "fcvtmu v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtmu(v23.V4S(), v12.V4S()),
+ "fcvtmu v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtmu(v30.V2D(), v1.V2D()),
+ "fcvtmu v30.2d, "
+ "v1.2d");
- COMPARE(Fcvtzs(v4.V2S(), v11.V2S()), "fcvtzs v4.2s, v11.2s");
- COMPARE(Fcvtzs(v23.V4S(), v12.V4S()), "fcvtzs v23.4s, v12.4s");
- COMPARE(Fcvtzs(v30.V2D(), v1.V2D()), "fcvtzs v30.2d, v1.2d");
- COMPARE(Fcvtzu(v4.V2S(), v11.V2S()), "fcvtzu v4.2s, v11.2s");
- COMPARE(Fcvtzu(v23.V4S(), v12.V4S()), "fcvtzu v23.4s, v12.4s");
- COMPARE(Fcvtzu(v30.V2D(), v1.V2D()), "fcvtzu v30.2d, v1.2d");
+ COMPARE_MACRO(Fcvtzs(v4.V2S(), v11.V2S()),
+ "fcvtzs v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtzs(v23.V4S(), v12.V4S()),
+ "fcvtzs v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtzs(v30.V2D(), v1.V2D()),
+ "fcvtzs v30.2d, "
+ "v1.2d");
+ COMPARE_MACRO(Fcvtzu(v4.V2S(), v11.V2S()),
+ "fcvtzu v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtzu(v23.V4S(), v12.V4S()),
+ "fcvtzu v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtzu(v30.V2D(), v1.V2D()),
+ "fcvtzu v30.2d, "
+ "v1.2d");
- COMPARE(Fcvtas(v4.V2S(), v11.V2S()), "fcvtas v4.2s, v11.2s");
- COMPARE(Fcvtas(v23.V4S(), v12.V4S()), "fcvtas v23.4s, v12.4s");
- COMPARE(Fcvtas(v30.V2D(), v1.V2D()), "fcvtas v30.2d, v1.2d");
- COMPARE(Fcvtau(v4.V2S(), v11.V2S()), "fcvtau v4.2s, v11.2s");
- COMPARE(Fcvtau(v23.V4S(), v12.V4S()), "fcvtau v23.4s, v12.4s");
- COMPARE(Fcvtau(v30.V2D(), v1.V2D()), "fcvtau v30.2d, v1.2d");
+ COMPARE_MACRO(Fcvtas(v4.V2S(), v11.V2S()),
+ "fcvtas v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtas(v23.V4S(), v12.V4S()),
+ "fcvtas v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtas(v30.V2D(), v1.V2D()),
+ "fcvtas v30.2d, "
+ "v1.2d");
+ COMPARE_MACRO(Fcvtau(v4.V2S(), v11.V2S()),
+ "fcvtau v4.2s, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtau(v23.V4S(), v12.V4S()),
+ "fcvtau v23.4s, "
+ "v12.4s");
+ COMPARE_MACRO(Fcvtau(v30.V2D(), v1.V2D()),
+ "fcvtau v30.2d, "
+ "v1.2d");
- COMPARE(Fcvtns(s0, s1), "fcvtns s0, s1");
- COMPARE(Fcvtns(d2, d3), "fcvtns d2, d3");
- COMPARE(Fcvtnu(s4, s5), "fcvtnu s4, s5");
- COMPARE(Fcvtnu(d6, d7), "fcvtnu d6, d7");
- COMPARE(Fcvtps(s8, s9), "fcvtps s8, s9");
- COMPARE(Fcvtps(d10, d11), "fcvtps d10, d11");
- COMPARE(Fcvtpu(s12, s13), "fcvtpu s12, s13");
- COMPARE(Fcvtpu(d14, d15), "fcvtpu d14, d15");
- COMPARE(Fcvtms(s16, s17), "fcvtms s16, s17");
- COMPARE(Fcvtms(d18, d19), "fcvtms d18, d19");
- COMPARE(Fcvtmu(s20, s21), "fcvtmu s20, s21");
- COMPARE(Fcvtmu(d22, d23), "fcvtmu d22, d23");
- COMPARE(Fcvtzs(s24, s25), "fcvtzs s24, s25");
- COMPARE(Fcvtzs(d26, d27), "fcvtzs d26, d27");
- COMPARE(Fcvtzu(s28, s29), "fcvtzu s28, s29");
- COMPARE(Fcvtzu(d30, d31), "fcvtzu d30, d31");
- COMPARE(Fcvtas(s0, s1), "fcvtas s0, s1");
- COMPARE(Fcvtas(d2, d3), "fcvtas d2, d3");
- COMPARE(Fcvtau(s4, s5), "fcvtau s4, s5");
- COMPARE(Fcvtau(d6, d7), "fcvtau d6, d7");
+ COMPARE_MACRO(Fcvtns(s0, s1), "fcvtns s0, s1");
+ COMPARE_MACRO(Fcvtns(d2, d3), "fcvtns d2, d3");
+ COMPARE_MACRO(Fcvtnu(s4, s5), "fcvtnu s4, s5");
+ COMPARE_MACRO(Fcvtnu(d6, d7), "fcvtnu d6, d7");
+ COMPARE_MACRO(Fcvtps(s8, s9), "fcvtps s8, s9");
+ COMPARE_MACRO(Fcvtps(d10, d11), "fcvtps d10, d11");
+ COMPARE_MACRO(Fcvtpu(s12, s13), "fcvtpu s12, s13");
+ COMPARE_MACRO(Fcvtpu(d14, d15), "fcvtpu d14, d15");
+ COMPARE_MACRO(Fcvtms(s16, s17), "fcvtms s16, s17");
+ COMPARE_MACRO(Fcvtms(d18, d19), "fcvtms d18, d19");
+ COMPARE_MACRO(Fcvtmu(s20, s21), "fcvtmu s20, s21");
+ COMPARE_MACRO(Fcvtmu(d22, d23), "fcvtmu d22, d23");
+ COMPARE_MACRO(Fcvtzs(s24, s25), "fcvtzs s24, s25");
+ COMPARE_MACRO(Fcvtzs(d26, d27), "fcvtzs d26, d27");
+ COMPARE_MACRO(Fcvtzu(s28, s29), "fcvtzu s28, s29");
+ COMPARE_MACRO(Fcvtzu(d30, d31), "fcvtzu d30, d31");
+ COMPARE_MACRO(Fcvtas(s0, s1), "fcvtas s0, s1");
+ COMPARE_MACRO(Fcvtas(d2, d3), "fcvtas d2, d3");
+ COMPARE_MACRO(Fcvtau(s4, s5), "fcvtau s4, s5");
+ COMPARE_MACRO(Fcvtau(d6, d7), "fcvtau d6, d7");
- COMPARE(Fcvtl(v3.V4S(), v5.V4H()), "fcvtl v3.4s, v5.4h");
- COMPARE(Fcvtl(v7.V2D(), v11.V2S()), "fcvtl v7.2d, v11.2s");
- COMPARE(Fcvtl2(v13.V4S(), v17.V8H()), "fcvtl2 v13.4s, v17.8h");
- COMPARE(Fcvtl2(v23.V2D(), v29.V4S()), "fcvtl2 v23.2d, v29.4s");
+ COMPARE_MACRO(Fcvtl(v3.V4S(), v5.V4H()),
+ "fcvtl v3.4s, "
+ "v5.4h");
+ COMPARE_MACRO(Fcvtl(v7.V2D(), v11.V2S()),
+ "fcvtl v7.2d, "
+ "v11.2s");
+ COMPARE_MACRO(Fcvtl2(v13.V4S(), v17.V8H()),
+ "fcvtl2 v13.4s, "
+ "v17.8h");
+ COMPARE_MACRO(Fcvtl2(v23.V2D(), v29.V4S()),
+ "fcvtl2 v23.2d, "
+ "v29.4s");
- COMPARE(Fcvtn(v3.V4H(), v5.V4S()), "fcvtn v3.4h, v5.4s");
- COMPARE(Fcvtn(v7.V2S(), v11.V2D()), "fcvtn v7.2s, v11.2d");
- COMPARE(Fcvtn2(v13.V8H(), v17.V4S()), "fcvtn2 v13.8h, v17.4s");
- COMPARE(Fcvtn2(v23.V4S(), v29.V2D()), "fcvtn2 v23.4s, v29.2d");
+ COMPARE_MACRO(Fcvtn(v3.V4H(), v5.V4S()),
+ "fcvtn v3.4h, "
+ "v5.4s");
+ COMPARE_MACRO(Fcvtn(v7.V2S(), v11.V2D()),
+ "fcvtn v7.2s, "
+ "v11.2d");
+ COMPARE_MACRO(Fcvtn2(v13.V8H(), v17.V4S()),
+ "fcvtn2 v13.8h, "
+ "v17.4s");
+ COMPARE_MACRO(Fcvtn2(v23.V4S(), v29.V2D()),
+ "fcvtn2 v23.4s, "
+ "v29.2d");
- COMPARE(Fcvtxn(v5.V2S(), v7.V2D()), "fcvtxn v5.2s, v7.2d");
- COMPARE(Fcvtxn2(v8.V4S(), v13.V2D()), "fcvtxn2 v8.4s, v13.2d");
- COMPARE(Fcvtxn(s17, d31), "fcvtxn s17, d31");
+ COMPARE_MACRO(Fcvtxn(v5.V2S(), v7.V2D()),
+ "fcvtxn v5.2s, "
+ "v7.2d");
+ COMPARE_MACRO(Fcvtxn2(v8.V4S(), v13.V2D()),
+ "fcvtxn2 v8.4s, "
+ "v13.2d");
+ COMPARE_MACRO(Fcvtxn(s17, d31), "fcvtxn s17, d31");
- COMPARE(Frecpx(s0, s1), "frecpx s0, s1");
- COMPARE(Frecpx(s31, s30), "frecpx s31, s30");
- COMPARE(Frecpx(d2, d3), "frecpx d2, d3");
- COMPARE(Frecpx(d31, d30), "frecpx d31, d30");
+ COMPARE_MACRO(Frecpx(s0, s1), "frecpx s0, s1");
+ COMPARE_MACRO(Frecpx(s31, s30), "frecpx s31, s30");
+ COMPARE_MACRO(Frecpx(d2, d3), "frecpx d2, d3");
+ COMPARE_MACRO(Frecpx(d31, d30), "frecpx d31, d30");
- COMPARE(Scvtf(v5.V2S(), v3.V2S()), "scvtf v5.2s, v3.2s");
- COMPARE(Scvtf(v6.V4S(), v4.V4S()), "scvtf v6.4s, v4.4s");
- COMPARE(Scvtf(v7.V2D(), v5.V2D()), "scvtf v7.2d, v5.2d");
- COMPARE(Scvtf(s8, s6), "scvtf s8, s6");
- COMPARE(Scvtf(d8, d6), "scvtf d8, d6");
+ COMPARE_MACRO(Scvtf(v5.V2S(), v3.V2S()),
+ "scvtf v5.2s, "
+ "v3.2s");
+ COMPARE_MACRO(Scvtf(v6.V4S(), v4.V4S()),
+ "scvtf v6.4s, "
+ "v4.4s");
+ COMPARE_MACRO(Scvtf(v7.V2D(), v5.V2D()),
+ "scvtf v7.2d, "
+ "v5.2d");
+ COMPARE_MACRO(Scvtf(s8, s6), "scvtf s8, s6");
+ COMPARE_MACRO(Scvtf(d8, d6), "scvtf d8, d6");
- COMPARE(Ucvtf(v5.V2S(), v3.V2S()), "ucvtf v5.2s, v3.2s");
- COMPARE(Ucvtf(v6.V4S(), v4.V4S()), "ucvtf v6.4s, v4.4s");
- COMPARE(Ucvtf(v7.V2D(), v5.V2D()), "ucvtf v7.2d, v5.2d");
- COMPARE(Ucvtf(s8, s6), "ucvtf s8, s6");
- COMPARE(Ucvtf(d8, d6), "ucvtf d8, d6");
+ COMPARE_MACRO(Ucvtf(v5.V2S(), v3.V2S()),
+ "ucvtf v5.2s, "
+ "v3.2s");
+ COMPARE_MACRO(Ucvtf(v6.V4S(), v4.V4S()),
+ "ucvtf v6.4s, "
+ "v4.4s");
+ COMPARE_MACRO(Ucvtf(v7.V2D(), v5.V2D()),
+ "ucvtf v7.2d, "
+ "v5.2d");
+ COMPARE_MACRO(Ucvtf(s8, s6), "ucvtf s8, s6");
+ COMPARE_MACRO(Ucvtf(d8, d6), "ucvtf d8, d6");
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Saddlp(v0.TA, v1.TB), "saddlp v0." TAS ", v1." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Saddlp(v0.TA, v1.TB), "saddlp v0." TAS ", v1." TBS);
NEON_FORMAT_LIST_LP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uaddlp(v0.TA, v1.TB), "uaddlp v0." TAS ", v1." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uaddlp(v0.TA, v1.TB), "uaddlp v0." TAS ", v1." TBS);
NEON_FORMAT_LIST_LP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Sadalp(v0.TA, v1.TB), "sadalp v0." TAS ", v1." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Sadalp(v0.TA, v1.TB), "sadalp v0." TAS ", v1." TBS);
NEON_FORMAT_LIST_LP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
- #define DISASM_INST(TA, TAS, TB, TBS) \
- COMPARE(Uadalp(v0.TA, v1.TB), "uadalp v0." TAS ", v1." TBS);
+#define DISASM_INST(TA, TAS, TB, TBS) \
+ COMPARE_MACRO(Uadalp(v0.TA, v1.TB), "uadalp v0." TAS ", v1." TBS);
NEON_FORMAT_LIST_LP(DISASM_INST)
- #undef DISASM_INST
+#undef DISASM_INST
CLEANUP();
}
TEST(neon_acrosslanes) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Smaxv(b4, v5.V8B()), "smaxv b4, v5.8b");
- COMPARE(Smaxv(b4, v5.V16B()), "smaxv b4, v5.16b");
- COMPARE(Smaxv(h4, v5.V4H()), "smaxv h4, v5.4h");
- COMPARE(Smaxv(h4, v5.V8H()), "smaxv h4, v5.8h");
- COMPARE(Smaxv(s4, v5.V4S()), "smaxv s4, v5.4s");
+ COMPARE_MACRO(Smaxv(b4, v5.V8B()), "smaxv b4, v5.8b");
+ COMPARE_MACRO(Smaxv(b4, v5.V16B()), "smaxv b4, v5.16b");
+ COMPARE_MACRO(Smaxv(h4, v5.V4H()), "smaxv h4, v5.4h");
+ COMPARE_MACRO(Smaxv(h4, v5.V8H()), "smaxv h4, v5.8h");
+ COMPARE_MACRO(Smaxv(s4, v5.V4S()), "smaxv s4, v5.4s");
- COMPARE(Sminv(b4, v5.V8B()), "sminv b4, v5.8b");
- COMPARE(Sminv(b4, v5.V16B()), "sminv b4, v5.16b");
- COMPARE(Sminv(h4, v5.V4H()), "sminv h4, v5.4h");
- COMPARE(Sminv(h4, v5.V8H()), "sminv h4, v5.8h");
- COMPARE(Sminv(s4, v5.V4S()), "sminv s4, v5.4s");
+ COMPARE_MACRO(Sminv(b4, v5.V8B()), "sminv b4, v5.8b");
+ COMPARE_MACRO(Sminv(b4, v5.V16B()), "sminv b4, v5.16b");
+ COMPARE_MACRO(Sminv(h4, v5.V4H()), "sminv h4, v5.4h");
+ COMPARE_MACRO(Sminv(h4, v5.V8H()), "sminv h4, v5.8h");
+ COMPARE_MACRO(Sminv(s4, v5.V4S()), "sminv s4, v5.4s");
- COMPARE(Umaxv(b4, v5.V8B()), "umaxv b4, v5.8b");
- COMPARE(Umaxv(b4, v5.V16B()), "umaxv b4, v5.16b");
- COMPARE(Umaxv(h4, v5.V4H()), "umaxv h4, v5.4h");
- COMPARE(Umaxv(h4, v5.V8H()), "umaxv h4, v5.8h");
- COMPARE(Umaxv(s4, v5.V4S()), "umaxv s4, v5.4s");
+ COMPARE_MACRO(Umaxv(b4, v5.V8B()), "umaxv b4, v5.8b");
+ COMPARE_MACRO(Umaxv(b4, v5.V16B()), "umaxv b4, v5.16b");
+ COMPARE_MACRO(Umaxv(h4, v5.V4H()), "umaxv h4, v5.4h");
+ COMPARE_MACRO(Umaxv(h4, v5.V8H()), "umaxv h4, v5.8h");
+ COMPARE_MACRO(Umaxv(s4, v5.V4S()), "umaxv s4, v5.4s");
- COMPARE(Uminv(b4, v5.V8B()), "uminv b4, v5.8b");
- COMPARE(Uminv(b4, v5.V16B()), "uminv b4, v5.16b");
- COMPARE(Uminv(h4, v5.V4H()), "uminv h4, v5.4h");
- COMPARE(Uminv(h4, v5.V8H()), "uminv h4, v5.8h");
- COMPARE(Uminv(s4, v5.V4S()), "uminv s4, v5.4s");
+ COMPARE_MACRO(Uminv(b4, v5.V8B()), "uminv b4, v5.8b");
+ COMPARE_MACRO(Uminv(b4, v5.V16B()), "uminv b4, v5.16b");
+ COMPARE_MACRO(Uminv(h4, v5.V4H()), "uminv h4, v5.4h");
+ COMPARE_MACRO(Uminv(h4, v5.V8H()), "uminv h4, v5.8h");
+ COMPARE_MACRO(Uminv(s4, v5.V4S()), "uminv s4, v5.4s");
- COMPARE(Addv(b4, v5.V8B()), "addv b4, v5.8b");
- COMPARE(Addv(b4, v5.V16B()), "addv b4, v5.16b");
- COMPARE(Addv(h4, v5.V4H()), "addv h4, v5.4h");
- COMPARE(Addv(h4, v5.V8H()), "addv h4, v5.8h");
- COMPARE(Addv(s4, v5.V4S()), "addv s4, v5.4s");
+ COMPARE_MACRO(Addv(b4, v5.V8B()), "addv b4, v5.8b");
+ COMPARE_MACRO(Addv(b4, v5.V16B()), "addv b4, v5.16b");
+ COMPARE_MACRO(Addv(h4, v5.V4H()), "addv h4, v5.4h");
+ COMPARE_MACRO(Addv(h4, v5.V8H()), "addv h4, v5.8h");
+ COMPARE_MACRO(Addv(s4, v5.V4S()), "addv s4, v5.4s");
- COMPARE(Saddlv(h4, v5.V8B()), "saddlv h4, v5.8b");
- COMPARE(Saddlv(h4, v5.V16B()), "saddlv h4, v5.16b");
- COMPARE(Saddlv(s4, v5.V4H()), "saddlv s4, v5.4h");
- COMPARE(Saddlv(s4, v5.V8H()), "saddlv s4, v5.8h");
- COMPARE(Saddlv(d4, v5.V4S()), "saddlv d4, v5.4s");
+ COMPARE_MACRO(Saddlv(h4, v5.V8B()), "saddlv h4, v5.8b");
+ COMPARE_MACRO(Saddlv(h4, v5.V16B()), "saddlv h4, v5.16b");
+ COMPARE_MACRO(Saddlv(s4, v5.V4H()), "saddlv s4, v5.4h");
+ COMPARE_MACRO(Saddlv(s4, v5.V8H()), "saddlv s4, v5.8h");
+ COMPARE_MACRO(Saddlv(d4, v5.V4S()), "saddlv d4, v5.4s");
- COMPARE(Uaddlv(h4, v5.V8B()), "uaddlv h4, v5.8b");
- COMPARE(Uaddlv(h4, v5.V16B()), "uaddlv h4, v5.16b");
- COMPARE(Uaddlv(s4, v5.V4H()), "uaddlv s4, v5.4h");
- COMPARE(Uaddlv(s4, v5.V8H()), "uaddlv s4, v5.8h");
- COMPARE(Uaddlv(d4, v5.V4S()), "uaddlv d4, v5.4s");
+ COMPARE_MACRO(Uaddlv(h4, v5.V8B()), "uaddlv h4, v5.8b");
+ COMPARE_MACRO(Uaddlv(h4, v5.V16B()), "uaddlv h4, v5.16b");
+ COMPARE_MACRO(Uaddlv(s4, v5.V4H()), "uaddlv s4, v5.4h");
+ COMPARE_MACRO(Uaddlv(s4, v5.V8H()), "uaddlv s4, v5.8h");
+ COMPARE_MACRO(Uaddlv(d4, v5.V4S()), "uaddlv d4, v5.4s");
- COMPARE(Fmaxv(s4, v5.V4S()), "fmaxv s4, v5.4s");
- COMPARE(Fminv(s4, v5.V4S()), "fminv s4, v5.4s");
- COMPARE(Fmaxnmv(s4, v5.V4S()), "fmaxnmv s4, v5.4s");
- COMPARE(Fminnmv(s4, v5.V4S()), "fminnmv s4, v5.4s");
+ COMPARE_MACRO(Fmaxv(s4, v5.V4S()), "fmaxv s4, v5.4s");
+ COMPARE_MACRO(Fminv(s4, v5.V4S()), "fminv s4, v5.4s");
+ COMPARE_MACRO(Fmaxnmv(s4, v5.V4S()), "fmaxnmv s4, v5.4s");
+ COMPARE_MACRO(Fminnmv(s4, v5.V4S()), "fminnmv s4, v5.4s");
CLEANUP();
}
TEST(neon_scalar_pairwise) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Addp(d0, v1.V2D()), "addp d0, v1.2d");
- COMPARE(Faddp(s0, v1.V2S()), "faddp s0, v1.2s");
- COMPARE(Faddp(d2, v3.V2D()), "faddp d2, v3.2d");
- COMPARE(Fmaxp(s4, v5.V2S()), "fmaxp s4, v5.2s");
- COMPARE(Fmaxp(d6, v7.V2D()), "fmaxp d6, v7.2d");
- COMPARE(Fmaxnmp(s8, v9.V2S()), "fmaxnmp s8, v9.2s");
- COMPARE(Fmaxnmp(d10, v11.V2D()), "fmaxnmp d10, v11.2d");
- COMPARE(Fminp(s12, v13.V2S()), "fminp s12, v13.2s");
- COMPARE(Fminp(d14, v15.V2D()), "fminp d14, v15.2d");
- COMPARE(Fminnmp(s16, v17.V2S()), "fminnmp s16, v17.2s");
- COMPARE(Fminnmp(d18, v19.V2D()), "fminnmp d18, v19.2d");
+ COMPARE_MACRO(Addp(d0, v1.V2D()), "addp d0, v1.2d");
+ COMPARE_MACRO(Faddp(s0, v1.V2S()), "faddp s0, v1.2s");
+ COMPARE_MACRO(Faddp(d2, v3.V2D()), "faddp d2, v3.2d");
+ COMPARE_MACRO(Fmaxp(s4, v5.V2S()), "fmaxp s4, v5.2s");
+ COMPARE_MACRO(Fmaxp(d6, v7.V2D()), "fmaxp d6, v7.2d");
+ COMPARE_MACRO(Fmaxnmp(s8, v9.V2S()), "fmaxnmp s8, v9.2s");
+ COMPARE_MACRO(Fmaxnmp(d10, v11.V2D()), "fmaxnmp d10, v11.2d");
+ COMPARE_MACRO(Fminp(s12, v13.V2S()), "fminp s12, v13.2s");
+ COMPARE_MACRO(Fminp(d14, v15.V2D()), "fminp d14, v15.2d");
+ COMPARE_MACRO(Fminnmp(s16, v17.V2S()), "fminnmp s16, v17.2s");
+ COMPARE_MACRO(Fminnmp(d18, v19.V2D()), "fminnmp d18, v19.2d");
CLEANUP();
}
TEST(neon_shift_immediate) {
- SETUP_MACRO();
+ SETUP();
- COMPARE(Sshr(v0.V8B(), v1.V8B(), 1), "sshr v0.8b, v1.8b, #1");
- COMPARE(Sshr(v2.V8B(), v3.V8B(), 8), "sshr v2.8b, v3.8b, #8");
- COMPARE(Sshr(v4.V16B(), v5.V16B(), 1), "sshr v4.16b, v5.16b, #1");
- COMPARE(Sshr(v6.V16B(), v7.V16B(), 8), "sshr v6.16b, v7.16b, #8");
- COMPARE(Sshr(v8.V4H(), v9.V4H(), 1), "sshr v8.4h, v9.4h, #1");
- COMPARE(Sshr(v10.V4H(), v11.V4H(), 16), "sshr v10.4h, v11.4h, #16");
- COMPARE(Sshr(v12.V8H(), v13.V8H(), 1), "sshr v12.8h, v13.8h, #1");
- COMPARE(Sshr(v14.V8H(), v15.V8H(), 16), "sshr v14.8h, v15.8h, #16");
- COMPARE(Sshr(v16.V2S(), v17.V2S(), 1), "sshr v16.2s, v17.2s, #1");
- COMPARE(Sshr(v18.V2S(), v19.V2S(), 32), "sshr v18.2s, v19.2s, #32");
- COMPARE(Sshr(v20.V4S(), v21.V4S(), 1), "sshr v20.4s, v21.4s, #1");
- COMPARE(Sshr(v22.V4S(), v23.V4S(), 32), "sshr v22.4s, v23.4s, #32");
- COMPARE(Sshr(v28.V2D(), v29.V2D(), 1), "sshr v28.2d, v29.2d, #1");
- COMPARE(Sshr(v30.V2D(), v31.V2D(), 64), "sshr v30.2d, v31.2d, #64");
- COMPARE(Sshr(d0, d1, 7), "sshr d0, d1, #7");
+ COMPARE_MACRO(Sshr(v0.V8B(), v1.V8B(), 1), "sshr v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Sshr(v2.V8B(), v3.V8B(), 8), "sshr v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Sshr(v4.V16B(), v5.V16B(), 1), "sshr v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Sshr(v6.V16B(), v7.V16B(), 8), "sshr v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Sshr(v8.V4H(), v9.V4H(), 1), "sshr v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Sshr(v10.V4H(), v11.V4H(), 16), "sshr v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Sshr(v12.V8H(), v13.V8H(), 1), "sshr v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Sshr(v14.V8H(), v15.V8H(), 16), "sshr v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Sshr(v16.V2S(), v17.V2S(), 1), "sshr v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Sshr(v18.V2S(), v19.V2S(), 32), "sshr v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Sshr(v20.V4S(), v21.V4S(), 1), "sshr v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Sshr(v22.V4S(), v23.V4S(), 32), "sshr v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Sshr(v28.V2D(), v29.V2D(), 1), "sshr v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Sshr(v30.V2D(), v31.V2D(), 64), "sshr v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Sshr(d0, d1, 7), "sshr d0, d1, #7");
- COMPARE(Ushr(v0.V8B(), v1.V8B(), 1), "ushr v0.8b, v1.8b, #1");
- COMPARE(Ushr(v2.V8B(), v3.V8B(), 8), "ushr v2.8b, v3.8b, #8");
- COMPARE(Ushr(v4.V16B(), v5.V16B(), 1), "ushr v4.16b, v5.16b, #1");
- COMPARE(Ushr(v6.V16B(), v7.V16B(), 8), "ushr v6.16b, v7.16b, #8");
- COMPARE(Ushr(v8.V4H(), v9.V4H(), 1), "ushr v8.4h, v9.4h, #1");
- COMPARE(Ushr(v10.V4H(), v11.V4H(), 16), "ushr v10.4h, v11.4h, #16");
- COMPARE(Ushr(v12.V8H(), v13.V8H(), 1), "ushr v12.8h, v13.8h, #1");
- COMPARE(Ushr(v14.V8H(), v15.V8H(), 16), "ushr v14.8h, v15.8h, #16");
- COMPARE(Ushr(v16.V2S(), v17.V2S(), 1), "ushr v16.2s, v17.2s, #1");
- COMPARE(Ushr(v18.V2S(), v19.V2S(), 32), "ushr v18.2s, v19.2s, #32");
- COMPARE(Ushr(v20.V4S(), v21.V4S(), 1), "ushr v20.4s, v21.4s, #1");
- COMPARE(Ushr(v22.V4S(), v23.V4S(), 32), "ushr v22.4s, v23.4s, #32");
- COMPARE(Ushr(v28.V2D(), v29.V2D(), 1), "ushr v28.2d, v29.2d, #1");
- COMPARE(Ushr(v30.V2D(), v31.V2D(), 64), "ushr v30.2d, v31.2d, #64");
- COMPARE(Ushr(d0, d1, 7), "ushr d0, d1, #7");
+ COMPARE_MACRO(Ushr(v0.V8B(), v1.V8B(), 1), "ushr v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Ushr(v2.V8B(), v3.V8B(), 8), "ushr v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Ushr(v4.V16B(), v5.V16B(), 1), "ushr v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Ushr(v6.V16B(), v7.V16B(), 8), "ushr v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Ushr(v8.V4H(), v9.V4H(), 1), "ushr v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Ushr(v10.V4H(), v11.V4H(), 16), "ushr v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Ushr(v12.V8H(), v13.V8H(), 1), "ushr v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Ushr(v14.V8H(), v15.V8H(), 16), "ushr v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Ushr(v16.V2S(), v17.V2S(), 1), "ushr v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Ushr(v18.V2S(), v19.V2S(), 32), "ushr v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Ushr(v20.V4S(), v21.V4S(), 1), "ushr v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Ushr(v22.V4S(), v23.V4S(), 32), "ushr v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Ushr(v28.V2D(), v29.V2D(), 1), "ushr v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Ushr(v30.V2D(), v31.V2D(), 64), "ushr v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Ushr(d0, d1, 7), "ushr d0, d1, #7");
- COMPARE(Srshr(v0.V8B(), v1.V8B(), 1), "srshr v0.8b, v1.8b, #1");
- COMPARE(Srshr(v2.V8B(), v3.V8B(), 8), "srshr v2.8b, v3.8b, #8");
- COMPARE(Srshr(v4.V16B(), v5.V16B(), 1), "srshr v4.16b, v5.16b, #1");
- COMPARE(Srshr(v6.V16B(), v7.V16B(), 8), "srshr v6.16b, v7.16b, #8");
- COMPARE(Srshr(v8.V4H(), v9.V4H(), 1), "srshr v8.4h, v9.4h, #1");
- COMPARE(Srshr(v10.V4H(), v11.V4H(), 16), "srshr v10.4h, v11.4h, #16");
- COMPARE(Srshr(v12.V8H(), v13.V8H(), 1), "srshr v12.8h, v13.8h, #1");
- COMPARE(Srshr(v14.V8H(), v15.V8H(), 16), "srshr v14.8h, v15.8h, #16");
- COMPARE(Srshr(v16.V2S(), v17.V2S(), 1), "srshr v16.2s, v17.2s, #1");
- COMPARE(Srshr(v18.V2S(), v19.V2S(), 32), "srshr v18.2s, v19.2s, #32");
- COMPARE(Srshr(v20.V4S(), v21.V4S(), 1), "srshr v20.4s, v21.4s, #1");
- COMPARE(Srshr(v22.V4S(), v23.V4S(), 32), "srshr v22.4s, v23.4s, #32");
- COMPARE(Srshr(v28.V2D(), v29.V2D(), 1), "srshr v28.2d, v29.2d, #1");
- COMPARE(Srshr(v30.V2D(), v31.V2D(), 64), "srshr v30.2d, v31.2d, #64");
- COMPARE(Srshr(d0, d1, 7), "srshr d0, d1, #7");
+ COMPARE_MACRO(Srshr(v0.V8B(), v1.V8B(), 1), "srshr v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Srshr(v2.V8B(), v3.V8B(), 8), "srshr v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Srshr(v4.V16B(), v5.V16B(), 1), "srshr v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Srshr(v6.V16B(), v7.V16B(), 8), "srshr v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Srshr(v8.V4H(), v9.V4H(), 1), "srshr v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Srshr(v10.V4H(), v11.V4H(), 16), "srshr v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Srshr(v12.V8H(), v13.V8H(), 1), "srshr v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Srshr(v14.V8H(), v15.V8H(), 16), "srshr v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Srshr(v16.V2S(), v17.V2S(), 1), "srshr v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Srshr(v18.V2S(), v19.V2S(), 32), "srshr v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Srshr(v20.V4S(), v21.V4S(), 1), "srshr v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Srshr(v22.V4S(), v23.V4S(), 32), "srshr v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Srshr(v28.V2D(), v29.V2D(), 1), "srshr v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Srshr(v30.V2D(), v31.V2D(), 64), "srshr v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Srshr(d0, d1, 7), "srshr d0, d1, #7");
- COMPARE(Urshr(v0.V8B(), v1.V8B(), 1), "urshr v0.8b, v1.8b, #1");
- COMPARE(Urshr(v2.V8B(), v3.V8B(), 8), "urshr v2.8b, v3.8b, #8");
- COMPARE(Urshr(v4.V16B(), v5.V16B(), 1), "urshr v4.16b, v5.16b, #1");
- COMPARE(Urshr(v6.V16B(), v7.V16B(), 8), "urshr v6.16b, v7.16b, #8");
- COMPARE(Urshr(v8.V4H(), v9.V4H(), 1), "urshr v8.4h, v9.4h, #1");
- COMPARE(Urshr(v10.V4H(), v11.V4H(), 16), "urshr v10.4h, v11.4h, #16");
- COMPARE(Urshr(v12.V8H(), v13.V8H(), 1), "urshr v12.8h, v13.8h, #1");
- COMPARE(Urshr(v14.V8H(), v15.V8H(), 16), "urshr v14.8h, v15.8h, #16");
- COMPARE(Urshr(v16.V2S(), v17.V2S(), 1), "urshr v16.2s, v17.2s, #1");
- COMPARE(Urshr(v18.V2S(), v19.V2S(), 32), "urshr v18.2s, v19.2s, #32");
- COMPARE(Urshr(v20.V4S(), v21.V4S(), 1), "urshr v20.4s, v21.4s, #1");
- COMPARE(Urshr(v22.V4S(), v23.V4S(), 32), "urshr v22.4s, v23.4s, #32");
- COMPARE(Urshr(v28.V2D(), v29.V2D(), 1), "urshr v28.2d, v29.2d, #1");
- COMPARE(Urshr(v30.V2D(), v31.V2D(), 64), "urshr v30.2d, v31.2d, #64");
- COMPARE(Urshr(d0, d1, 7), "urshr d0, d1, #7");
+ COMPARE_MACRO(Urshr(v0.V8B(), v1.V8B(), 1), "urshr v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Urshr(v2.V8B(), v3.V8B(), 8), "urshr v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Urshr(v4.V16B(), v5.V16B(), 1), "urshr v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Urshr(v6.V16B(), v7.V16B(), 8), "urshr v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Urshr(v8.V4H(), v9.V4H(), 1), "urshr v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Urshr(v10.V4H(), v11.V4H(), 16), "urshr v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Urshr(v12.V8H(), v13.V8H(), 1), "urshr v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Urshr(v14.V8H(), v15.V8H(), 16), "urshr v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Urshr(v16.V2S(), v17.V2S(), 1), "urshr v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Urshr(v18.V2S(), v19.V2S(), 32), "urshr v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Urshr(v20.V4S(), v21.V4S(), 1), "urshr v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Urshr(v22.V4S(), v23.V4S(), 32), "urshr v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Urshr(v28.V2D(), v29.V2D(), 1), "urshr v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Urshr(v30.V2D(), v31.V2D(), 64), "urshr v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Urshr(d0, d1, 7), "urshr d0, d1, #7");
- COMPARE(Srsra(v0.V8B(), v1.V8B(), 1), "srsra v0.8b, v1.8b, #1");
- COMPARE(Srsra(v2.V8B(), v3.V8B(), 8), "srsra v2.8b, v3.8b, #8");
- COMPARE(Srsra(v4.V16B(), v5.V16B(), 1), "srsra v4.16b, v5.16b, #1");
- COMPARE(Srsra(v6.V16B(), v7.V16B(), 8), "srsra v6.16b, v7.16b, #8");
- COMPARE(Srsra(v8.V4H(), v9.V4H(), 1), "srsra v8.4h, v9.4h, #1");
- COMPARE(Srsra(v10.V4H(), v11.V4H(), 16), "srsra v10.4h, v11.4h, #16");
- COMPARE(Srsra(v12.V8H(), v13.V8H(), 1), "srsra v12.8h, v13.8h, #1");
- COMPARE(Srsra(v14.V8H(), v15.V8H(), 16), "srsra v14.8h, v15.8h, #16");
- COMPARE(Srsra(v16.V2S(), v17.V2S(), 1), "srsra v16.2s, v17.2s, #1");
- COMPARE(Srsra(v18.V2S(), v19.V2S(), 32), "srsra v18.2s, v19.2s, #32");
- COMPARE(Srsra(v20.V4S(), v21.V4S(), 1), "srsra v20.4s, v21.4s, #1");
- COMPARE(Srsra(v22.V4S(), v23.V4S(), 32), "srsra v22.4s, v23.4s, #32");
- COMPARE(Srsra(v28.V2D(), v29.V2D(), 1), "srsra v28.2d, v29.2d, #1");
- COMPARE(Srsra(v30.V2D(), v31.V2D(), 64), "srsra v30.2d, v31.2d, #64");
- COMPARE(Srsra(d0, d1, 7), "srsra d0, d1, #7");
+ COMPARE_MACRO(Srsra(v0.V8B(), v1.V8B(), 1), "srsra v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Srsra(v2.V8B(), v3.V8B(), 8), "srsra v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Srsra(v4.V16B(), v5.V16B(), 1), "srsra v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Srsra(v6.V16B(), v7.V16B(), 8), "srsra v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Srsra(v8.V4H(), v9.V4H(), 1), "srsra v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Srsra(v10.V4H(), v11.V4H(), 16), "srsra v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Srsra(v12.V8H(), v13.V8H(), 1), "srsra v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Srsra(v14.V8H(), v15.V8H(), 16), "srsra v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Srsra(v16.V2S(), v17.V2S(), 1), "srsra v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Srsra(v18.V2S(), v19.V2S(), 32), "srsra v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Srsra(v20.V4S(), v21.V4S(), 1), "srsra v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Srsra(v22.V4S(), v23.V4S(), 32), "srsra v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Srsra(v28.V2D(), v29.V2D(), 1), "srsra v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Srsra(v30.V2D(), v31.V2D(), 64), "srsra v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Srsra(d0, d1, 7), "srsra d0, d1, #7");
- COMPARE(Ssra(v0.V8B(), v1.V8B(), 1), "ssra v0.8b, v1.8b, #1");
- COMPARE(Ssra(v2.V8B(), v3.V8B(), 8), "ssra v2.8b, v3.8b, #8");
- COMPARE(Ssra(v4.V16B(), v5.V16B(), 1), "ssra v4.16b, v5.16b, #1");
- COMPARE(Ssra(v6.V16B(), v7.V16B(), 8), "ssra v6.16b, v7.16b, #8");
- COMPARE(Ssra(v8.V4H(), v9.V4H(), 1), "ssra v8.4h, v9.4h, #1");
- COMPARE(Ssra(v10.V4H(), v11.V4H(), 16), "ssra v10.4h, v11.4h, #16");
- COMPARE(Ssra(v12.V8H(), v13.V8H(), 1), "ssra v12.8h, v13.8h, #1");
- COMPARE(Ssra(v14.V8H(), v15.V8H(), 16), "ssra v14.8h, v15.8h, #16");
- COMPARE(Ssra(v16.V2S(), v17.V2S(), 1), "ssra v16.2s, v17.2s, #1");
- COMPARE(Ssra(v18.V2S(), v19.V2S(), 32), "ssra v18.2s, v19.2s, #32");
- COMPARE(Ssra(v20.V4S(), v21.V4S(), 1), "ssra v20.4s, v21.4s, #1");
- COMPARE(Ssra(v22.V4S(), v23.V4S(), 32), "ssra v22.4s, v23.4s, #32");
- COMPARE(Ssra(v28.V2D(), v29.V2D(), 1), "ssra v28.2d, v29.2d, #1");
- COMPARE(Ssra(v30.V2D(), v31.V2D(), 64), "ssra v30.2d, v31.2d, #64");
- COMPARE(Ssra(d0, d1, 7), "ssra d0, d1, #7");
+ COMPARE_MACRO(Ssra(v0.V8B(), v1.V8B(), 1), "ssra v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Ssra(v2.V8B(), v3.V8B(), 8), "ssra v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Ssra(v4.V16B(), v5.V16B(), 1), "ssra v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Ssra(v6.V16B(), v7.V16B(), 8), "ssra v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Ssra(v8.V4H(), v9.V4H(), 1), "ssra v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Ssra(v10.V4H(), v11.V4H(), 16), "ssra v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Ssra(v12.V8H(), v13.V8H(), 1), "ssra v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Ssra(v14.V8H(), v15.V8H(), 16), "ssra v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Ssra(v16.V2S(), v17.V2S(), 1), "ssra v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Ssra(v18.V2S(), v19.V2S(), 32), "ssra v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Ssra(v20.V4S(), v21.V4S(), 1), "ssra v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Ssra(v22.V4S(), v23.V4S(), 32), "ssra v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Ssra(v28.V2D(), v29.V2D(), 1), "ssra v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Ssra(v30.V2D(), v31.V2D(), 64), "ssra v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Ssra(d0, d1, 7), "ssra d0, d1, #7");
- COMPARE(Ursra(v0.V8B(), v1.V8B(), 1), "ursra v0.8b, v1.8b, #1");
- COMPARE(Ursra(v2.V8B(), v3.V8B(), 8), "ursra v2.8b, v3.8b, #8");
- COMPARE(Ursra(v4.V16B(), v5.V16B(), 1), "ursra v4.16b, v5.16b, #1");
- COMPARE(Ursra(v6.V16B(), v7.V16B(), 8), "ursra v6.16b, v7.16b, #8");
- COMPARE(Ursra(v8.V4H(), v9.V4H(), 1), "ursra v8.4h, v9.4h, #1");
- COMPARE(Ursra(v10.V4H(), v11.V4H(), 16), "ursra v10.4h, v11.4h, #16");
- COMPARE(Ursra(v12.V8H(), v13.V8H(), 1), "ursra v12.8h, v13.8h, #1");
- COMPARE(Ursra(v14.V8H(), v15.V8H(), 16), "ursra v14.8h, v15.8h, #16");
- COMPARE(Ursra(v16.V2S(), v17.V2S(), 1), "ursra v16.2s, v17.2s, #1");
- COMPARE(Ursra(v18.V2S(), v19.V2S(), 32), "ursra v18.2s, v19.2s, #32");
- COMPARE(Ursra(v20.V4S(), v21.V4S(), 1), "ursra v20.4s, v21.4s, #1");
- COMPARE(Ursra(v22.V4S(), v23.V4S(), 32), "ursra v22.4s, v23.4s, #32");
- COMPARE(Ursra(v28.V2D(), v29.V2D(), 1), "ursra v28.2d, v29.2d, #1");
- COMPARE(Ursra(v30.V2D(), v31.V2D(), 64), "ursra v30.2d, v31.2d, #64");
- COMPARE(Ursra(d0, d1, 7), "ursra d0, d1, #7");
+ COMPARE_MACRO(Ursra(v0.V8B(), v1.V8B(), 1), "ursra v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Ursra(v2.V8B(), v3.V8B(), 8), "ursra v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Ursra(v4.V16B(), v5.V16B(), 1), "ursra v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Ursra(v6.V16B(), v7.V16B(), 8), "ursra v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Ursra(v8.V4H(), v9.V4H(), 1), "ursra v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Ursra(v10.V4H(), v11.V4H(), 16), "ursra v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Ursra(v12.V8H(), v13.V8H(), 1), "ursra v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Ursra(v14.V8H(), v15.V8H(), 16), "ursra v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Ursra(v16.V2S(), v17.V2S(), 1), "ursra v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Ursra(v18.V2S(), v19.V2S(), 32), "ursra v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Ursra(v20.V4S(), v21.V4S(), 1), "ursra v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Ursra(v22.V4S(), v23.V4S(), 32), "ursra v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Ursra(v28.V2D(), v29.V2D(), 1), "ursra v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Ursra(v30.V2D(), v31.V2D(), 64), "ursra v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Ursra(d0, d1, 7), "ursra d0, d1, #7");
- COMPARE(Usra(v0.V8B(), v1.V8B(), 1), "usra v0.8b, v1.8b, #1");
- COMPARE(Usra(v2.V8B(), v3.V8B(), 8), "usra v2.8b, v3.8b, #8");
- COMPARE(Usra(v4.V16B(), v5.V16B(), 1), "usra v4.16b, v5.16b, #1");
- COMPARE(Usra(v6.V16B(), v7.V16B(), 8), "usra v6.16b, v7.16b, #8");
- COMPARE(Usra(v8.V4H(), v9.V4H(), 1), "usra v8.4h, v9.4h, #1");
- COMPARE(Usra(v10.V4H(), v11.V4H(), 16), "usra v10.4h, v11.4h, #16");
- COMPARE(Usra(v12.V8H(), v13.V8H(), 1), "usra v12.8h, v13.8h, #1");
- COMPARE(Usra(v14.V8H(), v15.V8H(), 16), "usra v14.8h, v15.8h, #16");
- COMPARE(Usra(v16.V2S(), v17.V2S(), 1), "usra v16.2s, v17.2s, #1");
- COMPARE(Usra(v18.V2S(), v19.V2S(), 32), "usra v18.2s, v19.2s, #32");
- COMPARE(Usra(v20.V4S(), v21.V4S(), 1), "usra v20.4s, v21.4s, #1");
- COMPARE(Usra(v22.V4S(), v23.V4S(), 32), "usra v22.4s, v23.4s, #32");
- COMPARE(Usra(v28.V2D(), v29.V2D(), 1), "usra v28.2d, v29.2d, #1");
- COMPARE(Usra(v30.V2D(), v31.V2D(), 64), "usra v30.2d, v31.2d, #64");
- COMPARE(Usra(d0, d1, 7), "usra d0, d1, #7");
+ COMPARE_MACRO(Usra(v0.V8B(), v1.V8B(), 1), "usra v0.8b, v1.8b, #1");
+ COMPARE_MACRO(Usra(v2.V8B(), v3.V8B(), 8), "usra v2.8b, v3.8b, #8");
+ COMPARE_MACRO(Usra(v4.V16B(), v5.V16B(), 1), "usra v4.16b, v5.16b, #1");
+ COMPARE_MACRO(Usra(v6.V16B(), v7.V16B(), 8), "usra v6.16b, v7.16b, #8");
+ COMPARE_MACRO(Usra(v8.V4H(), v9.V4H(), 1), "usra v8.4h, v9.4h, #1");
+ COMPARE_MACRO(Usra(v10.V4H(), v11.V4H(), 16), "usra v10.4h, v11.4h, #16");
+ COMPARE_MACRO(Usra(v12.V8H(), v13.V8H(), 1), "usra v12.8h, v13.8h, #1");
+ COMPARE_MACRO(Usra(v14.V8H(), v15.V8H(), 16), "usra v14.8h, v15.8h, #16");
+ COMPARE_MACRO(Usra(v16.V2S(), v17.V2S(), 1), "usra v16.2s, v17.2s, #1");
+ COMPARE_MACRO(Usra(v18.V2S(), v19.V2S(), 32), "usra v18.2s, v19.2s, #32");
+ COMPARE_MACRO(Usra(v20.V4S(), v21.V4S(), 1), "usra v20.4s, v21.4s, #1");
+ COMPARE_MACRO(Usra(v22.V4S(), v23.V4S(), 32), "usra v22.4s, v23.4s, #32");
+ COMPARE_MACRO(Usra(v28.V2D(), v29.V2D(), 1), "usra v28.2d, v29.2d, #1");
+ COMPARE_MACRO(Usra(v30.V2D(), v31.V2D(), 64), "usra v30.2d, v31.2d, #64");
+ COMPARE_MACRO(Usra(d0, d1, 7), "usra d0, d1, #7");
- COMPARE(Sli(v1.V8B(), v8.V8B(), 1), "sli v1.8b, v8.8b, #1");
- COMPARE(Sli(v2.V16B(), v9.V16B(), 2), "sli v2.16b, v9.16b, #2");
- COMPARE(Sli(v3.V4H(), v1.V4H(), 3), "sli v3.4h, v1.4h, #3");
- COMPARE(Sli(v4.V8H(), v2.V8H(), 4), "sli v4.8h, v2.8h, #4");
- COMPARE(Sli(v5.V2S(), v3.V2S(), 5), "sli v5.2s, v3.2s, #5");
- COMPARE(Sli(v6.V4S(), v4.V4S(), 6), "sli v6.4s, v4.4s, #6");
- COMPARE(Sli(v7.V2D(), v5.V2D(), 7), "sli v7.2d, v5.2d, #7");
- COMPARE(Sli(d8, d6, 8), "sli d8, d6, #8");
+ COMPARE_MACRO(Sli(v1.V8B(), v8.V8B(), 1), "sli v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Sli(v2.V16B(), v9.V16B(), 2), "sli v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Sli(v3.V4H(), v1.V4H(), 3), "sli v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Sli(v4.V8H(), v2.V8H(), 4), "sli v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Sli(v5.V2S(), v3.V2S(), 5), "sli v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Sli(v6.V4S(), v4.V4S(), 6), "sli v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Sli(v7.V2D(), v5.V2D(), 7), "sli v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Sli(d8, d6, 8), "sli d8, d6, #8");
- COMPARE(Shl(v1.V8B(), v8.V8B(), 1), "shl v1.8b, v8.8b, #1");
- COMPARE(Shl(v2.V16B(), v9.V16B(), 2), "shl v2.16b, v9.16b, #2");
- COMPARE(Shl(v3.V4H(), v1.V4H(), 3), "shl v3.4h, v1.4h, #3");
- COMPARE(Shl(v4.V8H(), v2.V8H(), 4), "shl v4.8h, v2.8h, #4");
- COMPARE(Shl(v5.V2S(), v3.V2S(), 5), "shl v5.2s, v3.2s, #5");
- COMPARE(Shl(v6.V4S(), v4.V4S(), 6), "shl v6.4s, v4.4s, #6");
- COMPARE(Shl(v7.V2D(), v5.V2D(), 7), "shl v7.2d, v5.2d, #7");
- COMPARE(Shl(d8, d6, 8), "shl d8, d6, #8");
+ COMPARE_MACRO(Shl(v1.V8B(), v8.V8B(), 1), "shl v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Shl(v2.V16B(), v9.V16B(), 2), "shl v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Shl(v3.V4H(), v1.V4H(), 3), "shl v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Shl(v4.V8H(), v2.V8H(), 4), "shl v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Shl(v5.V2S(), v3.V2S(), 5), "shl v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Shl(v6.V4S(), v4.V4S(), 6), "shl v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Shl(v7.V2D(), v5.V2D(), 7), "shl v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Shl(d8, d6, 8), "shl d8, d6, #8");
- COMPARE(Sqshl(v1.V8B(), v8.V8B(), 1), "sqshl v1.8b, v8.8b, #1");
- COMPARE(Sqshl(v2.V16B(), v9.V16B(), 2), "sqshl v2.16b, v9.16b, #2");
- COMPARE(Sqshl(v3.V4H(), v1.V4H(), 3), "sqshl v3.4h, v1.4h, #3");
- COMPARE(Sqshl(v4.V8H(), v2.V8H(), 4), "sqshl v4.8h, v2.8h, #4");
- COMPARE(Sqshl(v5.V2S(), v3.V2S(), 5), "sqshl v5.2s, v3.2s, #5");
- COMPARE(Sqshl(v6.V4S(), v4.V4S(), 6), "sqshl v6.4s, v4.4s, #6");
- COMPARE(Sqshl(v7.V2D(), v5.V2D(), 7), "sqshl v7.2d, v5.2d, #7");
- COMPARE(Sqshl(b8, b7, 1), "sqshl b8, b7, #1");
- COMPARE(Sqshl(h9, h8, 2), "sqshl h9, h8, #2");
- COMPARE(Sqshl(s10, s9, 3), "sqshl s10, s9, #3");
- COMPARE(Sqshl(d11, d10, 4), "sqshl d11, d10, #4");
+ COMPARE_MACRO(Sqshl(v1.V8B(), v8.V8B(), 1), "sqshl v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Sqshl(v2.V16B(), v9.V16B(), 2), "sqshl v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Sqshl(v3.V4H(), v1.V4H(), 3), "sqshl v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Sqshl(v4.V8H(), v2.V8H(), 4), "sqshl v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Sqshl(v5.V2S(), v3.V2S(), 5), "sqshl v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Sqshl(v6.V4S(), v4.V4S(), 6), "sqshl v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Sqshl(v7.V2D(), v5.V2D(), 7), "sqshl v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Sqshl(b8, b7, 1), "sqshl b8, b7, #1");
+ COMPARE_MACRO(Sqshl(h9, h8, 2), "sqshl h9, h8, #2");
+ COMPARE_MACRO(Sqshl(s10, s9, 3), "sqshl s10, s9, #3");
+ COMPARE_MACRO(Sqshl(d11, d10, 4), "sqshl d11, d10, #4");
- COMPARE(Sqshlu(v1.V8B(), v8.V8B(), 1), "sqshlu v1.8b, v8.8b, #1");
- COMPARE(Sqshlu(v2.V16B(), v9.V16B(), 2), "sqshlu v2.16b, v9.16b, #2");
- COMPARE(Sqshlu(v3.V4H(), v1.V4H(), 3), "sqshlu v3.4h, v1.4h, #3");
- COMPARE(Sqshlu(v4.V8H(), v2.V8H(), 4), "sqshlu v4.8h, v2.8h, #4");
- COMPARE(Sqshlu(v5.V2S(), v3.V2S(), 5), "sqshlu v5.2s, v3.2s, #5");
- COMPARE(Sqshlu(v6.V4S(), v4.V4S(), 6), "sqshlu v6.4s, v4.4s, #6");
- COMPARE(Sqshlu(v7.V2D(), v5.V2D(), 7), "sqshlu v7.2d, v5.2d, #7");
- COMPARE(Sqshlu(b8, b7, 1), "sqshlu b8, b7, #1");
- COMPARE(Sqshlu(h9, h8, 2), "sqshlu h9, h8, #2");
- COMPARE(Sqshlu(s10, s9, 3), "sqshlu s10, s9, #3");
- COMPARE(Sqshlu(d11, d10, 4), "sqshlu d11, d10, #4");
+ COMPARE_MACRO(Sqshlu(v1.V8B(), v8.V8B(), 1), "sqshlu v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Sqshlu(v2.V16B(), v9.V16B(), 2), "sqshlu v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Sqshlu(v3.V4H(), v1.V4H(), 3), "sqshlu v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Sqshlu(v4.V8H(), v2.V8H(), 4), "sqshlu v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Sqshlu(v5.V2S(), v3.V2S(), 5), "sqshlu v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Sqshlu(v6.V4S(), v4.V4S(), 6), "sqshlu v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Sqshlu(v7.V2D(), v5.V2D(), 7), "sqshlu v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Sqshlu(b8, b7, 1), "sqshlu b8, b7, #1");
+ COMPARE_MACRO(Sqshlu(h9, h8, 2), "sqshlu h9, h8, #2");
+ COMPARE_MACRO(Sqshlu(s10, s9, 3), "sqshlu s10, s9, #3");
+ COMPARE_MACRO(Sqshlu(d11, d10, 4), "sqshlu d11, d10, #4");
- COMPARE(Uqshl(v1.V8B(), v8.V8B(), 1), "uqshl v1.8b, v8.8b, #1");
- COMPARE(Uqshl(v2.V16B(), v9.V16B(), 2), "uqshl v2.16b, v9.16b, #2");
- COMPARE(Uqshl(v3.V4H(), v1.V4H(), 3), "uqshl v3.4h, v1.4h, #3");
- COMPARE(Uqshl(v4.V8H(), v2.V8H(), 4), "uqshl v4.8h, v2.8h, #4");
- COMPARE(Uqshl(v5.V2S(), v3.V2S(), 5), "uqshl v5.2s, v3.2s, #5");
- COMPARE(Uqshl(v6.V4S(), v4.V4S(), 6), "uqshl v6.4s, v4.4s, #6");
- COMPARE(Uqshl(v7.V2D(), v5.V2D(), 7), "uqshl v7.2d, v5.2d, #7");
- COMPARE(Uqshl(b8, b7, 1), "uqshl b8, b7, #1");
- COMPARE(Uqshl(h9, h8, 2), "uqshl h9, h8, #2");
- COMPARE(Uqshl(s10, s9, 3), "uqshl s10, s9, #3");
- COMPARE(Uqshl(d11, d10, 4), "uqshl d11, d10, #4");
+ COMPARE_MACRO(Uqshl(v1.V8B(), v8.V8B(), 1), "uqshl v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Uqshl(v2.V16B(), v9.V16B(), 2), "uqshl v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Uqshl(v3.V4H(), v1.V4H(), 3), "uqshl v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Uqshl(v4.V8H(), v2.V8H(), 4), "uqshl v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Uqshl(v5.V2S(), v3.V2S(), 5), "uqshl v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Uqshl(v6.V4S(), v4.V4S(), 6), "uqshl v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Uqshl(v7.V2D(), v5.V2D(), 7), "uqshl v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Uqshl(b8, b7, 1), "uqshl b8, b7, #1");
+ COMPARE_MACRO(Uqshl(h9, h8, 2), "uqshl h9, h8, #2");
+ COMPARE_MACRO(Uqshl(s10, s9, 3), "uqshl s10, s9, #3");
+ COMPARE_MACRO(Uqshl(d11, d10, 4), "uqshl d11, d10, #4");
- COMPARE(Sshll(v1.V8H(), v8.V8B(), 1), "sshll v1.8h, v8.8b, #1");
- COMPARE(Sshll(v3.V4S(), v1.V4H(), 3), "sshll v3.4s, v1.4h, #3");
- COMPARE(Sshll(v5.V2D(), v3.V2S(), 5), "sshll v5.2d, v3.2s, #5");
- COMPARE(Sshll2(v2.V8H(), v9.V16B(), 2), "sshll2 v2.8h, v9.16b, #2");
- COMPARE(Sshll2(v4.V4S(), v2.V8H(), 4), "sshll2 v4.4s, v2.8h, #4");
- COMPARE(Sshll2(v6.V2D(), v4.V4S(), 6), "sshll2 v6.2d, v4.4s, #6");
+ COMPARE_MACRO(Sshll(v1.V8H(), v8.V8B(), 1), "sshll v1.8h, v8.8b, #1");
+ COMPARE_MACRO(Sshll(v3.V4S(), v1.V4H(), 3), "sshll v3.4s, v1.4h, #3");
+ COMPARE_MACRO(Sshll(v5.V2D(), v3.V2S(), 5), "sshll v5.2d, v3.2s, #5");
+ COMPARE_MACRO(Sshll2(v2.V8H(), v9.V16B(), 2), "sshll2 v2.8h, v9.16b, #2");
+ COMPARE_MACRO(Sshll2(v4.V4S(), v2.V8H(), 4), "sshll2 v4.4s, v2.8h, #4");
+ COMPARE_MACRO(Sshll2(v6.V2D(), v4.V4S(), 6), "sshll2 v6.2d, v4.4s, #6");
- COMPARE(Sshll(v1.V8H(), v8.V8B(), 0), "sxtl v1.8h, v8.8b");
- COMPARE(Sshll(v3.V4S(), v1.V4H(), 0), "sxtl v3.4s, v1.4h");
- COMPARE(Sshll(v5.V2D(), v3.V2S(), 0), "sxtl v5.2d, v3.2s");
- COMPARE(Sshll2(v2.V8H(), v9.V16B(), 0), "sxtl2 v2.8h, v9.16b");
- COMPARE(Sshll2(v4.V4S(), v2.V8H(), 0), "sxtl2 v4.4s, v2.8h");
- COMPARE(Sshll2(v6.V2D(), v4.V4S(), 0), "sxtl2 v6.2d, v4.4s");
+ COMPARE_MACRO(Sshll(v1.V8H(), v8.V8B(), 0), "sxtl v1.8h, v8.8b");
+ COMPARE_MACRO(Sshll(v3.V4S(), v1.V4H(), 0), "sxtl v3.4s, v1.4h");
+ COMPARE_MACRO(Sshll(v5.V2D(), v3.V2S(), 0), "sxtl v5.2d, v3.2s");
+ COMPARE_MACRO(Sshll2(v2.V8H(), v9.V16B(), 0), "sxtl2 v2.8h, v9.16b");
+ COMPARE_MACRO(Sshll2(v4.V4S(), v2.V8H(), 0), "sxtl2 v4.4s, v2.8h");
+ COMPARE_MACRO(Sshll2(v6.V2D(), v4.V4S(), 0), "sxtl2 v6.2d, v4.4s");
- COMPARE(Sxtl(v1.V8H(), v8.V8B()), "sxtl v1.8h, v8.8b");
- COMPARE(Sxtl(v3.V4S(), v1.V4H()), "sxtl v3.4s, v1.4h");
- COMPARE(Sxtl(v5.V2D(), v3.V2S()), "sxtl v5.2d, v3.2s");
- COMPARE(Sxtl2(v2.V8H(), v9.V16B()), "sxtl2 v2.8h, v9.16b");
- COMPARE(Sxtl2(v4.V4S(), v2.V8H()), "sxtl2 v4.4s, v2.8h");
- COMPARE(Sxtl2(v6.V2D(), v4.V4S()), "sxtl2 v6.2d, v4.4s");
+ COMPARE_MACRO(Sxtl(v1.V8H(), v8.V8B()), "sxtl v1.8h, v8.8b");
+ COMPARE_MACRO(Sxtl(v3.V4S(), v1.V4H()), "sxtl v3.4s, v1.4h");
+ COMPARE_MACRO(Sxtl(v5.V2D(), v3.V2S()), "sxtl v5.2d, v3.2s");
+ COMPARE_MACRO(Sxtl2(v2.V8H(), v9.V16B()), "sxtl2 v2.8h, v9.16b");
+ COMPARE_MACRO(Sxtl2(v4.V4S(), v2.V8H()), "sxtl2 v4.4s, v2.8h");
+ COMPARE_MACRO(Sxtl2(v6.V2D(), v4.V4S()), "sxtl2 v6.2d, v4.4s");
- COMPARE(Ushll(v1.V8H(), v8.V8B(), 1), "ushll v1.8h, v8.8b, #1");
- COMPARE(Ushll(v3.V4S(), v1.V4H(), 3), "ushll v3.4s, v1.4h, #3");
- COMPARE(Ushll(v5.V2D(), v3.V2S(), 5), "ushll v5.2d, v3.2s, #5");
- COMPARE(Ushll2(v2.V8H(), v9.V16B(), 2), "ushll2 v2.8h, v9.16b, #2");
- COMPARE(Ushll2(v4.V4S(), v2.V8H(), 4), "ushll2 v4.4s, v2.8h, #4");
- COMPARE(Ushll2(v6.V2D(), v4.V4S(), 6), "ushll2 v6.2d, v4.4s, #6");
+ COMPARE_MACRO(Ushll(v1.V8H(), v8.V8B(), 1), "ushll v1.8h, v8.8b, #1");
+ COMPARE_MACRO(Ushll(v3.V4S(), v1.V4H(), 3), "ushll v3.4s, v1.4h, #3");
+ COMPARE_MACRO(Ushll(v5.V2D(), v3.V2S(), 5), "ushll v5.2d, v3.2s, #5");
+ COMPARE_MACRO(Ushll2(v2.V8H(), v9.V16B(), 2), "ushll2 v2.8h, v9.16b, #2");
+ COMPARE_MACRO(Ushll2(v4.V4S(), v2.V8H(), 4), "ushll2 v4.4s, v2.8h, #4");
+ COMPARE_MACRO(Ushll2(v6.V2D(), v4.V4S(), 6), "ushll2 v6.2d, v4.4s, #6");
- COMPARE(Ushll(v1.V8H(), v8.V8B(), 0), "uxtl v1.8h, v8.8b");
- COMPARE(Ushll(v3.V4S(), v1.V4H(), 0), "uxtl v3.4s, v1.4h");
- COMPARE(Ushll(v5.V2D(), v3.V2S(), 0), "uxtl v5.2d, v3.2s");
- COMPARE(Ushll2(v2.V8H(), v9.V16B(), 0), "uxtl2 v2.8h, v9.16b");
- COMPARE(Ushll2(v4.V4S(), v2.V8H(), 0), "uxtl2 v4.4s, v2.8h");
- COMPARE(Ushll2(v6.V2D(), v4.V4S(), 0), "uxtl2 v6.2d, v4.4s");
+ COMPARE_MACRO(Ushll(v1.V8H(), v8.V8B(), 0), "uxtl v1.8h, v8.8b");
+ COMPARE_MACRO(Ushll(v3.V4S(), v1.V4H(), 0), "uxtl v3.4s, v1.4h");
+ COMPARE_MACRO(Ushll(v5.V2D(), v3.V2S(), 0), "uxtl v5.2d, v3.2s");
+ COMPARE_MACRO(Ushll2(v2.V8H(), v9.V16B(), 0), "uxtl2 v2.8h, v9.16b");
+ COMPARE_MACRO(Ushll2(v4.V4S(), v2.V8H(), 0), "uxtl2 v4.4s, v2.8h");
+ COMPARE_MACRO(Ushll2(v6.V2D(), v4.V4S(), 0), "uxtl2 v6.2d, v4.4s");
- COMPARE(Uxtl(v1.V8H(), v8.V8B()), "uxtl v1.8h, v8.8b");
- COMPARE(Uxtl(v3.V4S(), v1.V4H()), "uxtl v3.4s, v1.4h");
- COMPARE(Uxtl(v5.V2D(), v3.V2S()), "uxtl v5.2d, v3.2s");
- COMPARE(Uxtl2(v2.V8H(), v9.V16B()), "uxtl2 v2.8h, v9.16b");
- COMPARE(Uxtl2(v4.V4S(), v2.V8H()), "uxtl2 v4.4s, v2.8h");
- COMPARE(Uxtl2(v6.V2D(), v4.V4S()), "uxtl2 v6.2d, v4.4s");
+ COMPARE_MACRO(Uxtl(v1.V8H(), v8.V8B()), "uxtl v1.8h, v8.8b");
+ COMPARE_MACRO(Uxtl(v3.V4S(), v1.V4H()), "uxtl v3.4s, v1.4h");
+ COMPARE_MACRO(Uxtl(v5.V2D(), v3.V2S()), "uxtl v5.2d, v3.2s");
+ COMPARE_MACRO(Uxtl2(v2.V8H(), v9.V16B()), "uxtl2 v2.8h, v9.16b");
+ COMPARE_MACRO(Uxtl2(v4.V4S(), v2.V8H()), "uxtl2 v4.4s, v2.8h");
+ COMPARE_MACRO(Uxtl2(v6.V2D(), v4.V4S()), "uxtl2 v6.2d, v4.4s");
- COMPARE(Sri(v1.V8B(), v8.V8B(), 1), "sri v1.8b, v8.8b, #1");
- COMPARE(Sri(v2.V16B(), v9.V16B(), 2), "sri v2.16b, v9.16b, #2");
- COMPARE(Sri(v3.V4H(), v1.V4H(), 3), "sri v3.4h, v1.4h, #3");
- COMPARE(Sri(v4.V8H(), v2.V8H(), 4), "sri v4.8h, v2.8h, #4");
- COMPARE(Sri(v5.V2S(), v3.V2S(), 5), "sri v5.2s, v3.2s, #5");
- COMPARE(Sri(v6.V4S(), v4.V4S(), 6), "sri v6.4s, v4.4s, #6");
- COMPARE(Sri(v7.V2D(), v5.V2D(), 7), "sri v7.2d, v5.2d, #7");
- COMPARE(Sri(d8, d6, 8), "sri d8, d6, #8");
+ COMPARE_MACRO(Sri(v1.V8B(), v8.V8B(), 1), "sri v1.8b, v8.8b, #1");
+ COMPARE_MACRO(Sri(v2.V16B(), v9.V16B(), 2), "sri v2.16b, v9.16b, #2");
+ COMPARE_MACRO(Sri(v3.V4H(), v1.V4H(), 3), "sri v3.4h, v1.4h, #3");
+ COMPARE_MACRO(Sri(v4.V8H(), v2.V8H(), 4), "sri v4.8h, v2.8h, #4");
+ COMPARE_MACRO(Sri(v5.V2S(), v3.V2S(), 5), "sri v5.2s, v3.2s, #5");
+ COMPARE_MACRO(Sri(v6.V4S(), v4.V4S(), 6), "sri v6.4s, v4.4s, #6");
+ COMPARE_MACRO(Sri(v7.V2D(), v5.V2D(), 7), "sri v7.2d, v5.2d, #7");
+ COMPARE_MACRO(Sri(d8, d6, 8), "sri d8, d6, #8");
- COMPARE(Shrn(v0.V8B(), v1.V8H(), 1), "shrn v0.8b, v1.8h, #1");
- COMPARE(Shrn(v1.V4H(), v2.V4S(), 2), "shrn v1.4h, v2.4s, #2");
- COMPARE(Shrn(v2.V2S(), v3.V2D(), 3), "shrn v2.2s, v3.2d, #3");
- COMPARE(Shrn2(v0.V16B(), v1.V8H(), 4), "shrn2 v0.16b, v1.8h, #4");
- COMPARE(Shrn2(v1.V8H(), v2.V4S(), 5), "shrn2 v1.8h, v2.4s, #5");
- COMPARE(Shrn2(v2.V4S(), v3.V2D(), 6), "shrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Shrn(v0.V8B(), v1.V8H(), 1), "shrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Shrn(v1.V4H(), v2.V4S(), 2), "shrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Shrn(v2.V2S(), v3.V2D(), 3), "shrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Shrn2(v0.V16B(), v1.V8H(), 4), "shrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Shrn2(v1.V8H(), v2.V4S(), 5), "shrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Shrn2(v2.V4S(), v3.V2D(), 6), "shrn2 v2.4s, v3.2d, #6");
- COMPARE(Rshrn(v0.V8B(), v1.V8H(), 1), "rshrn v0.8b, v1.8h, #1");
- COMPARE(Rshrn(v1.V4H(), v2.V4S(), 2), "rshrn v1.4h, v2.4s, #2");
- COMPARE(Rshrn(v2.V2S(), v3.V2D(), 3), "rshrn v2.2s, v3.2d, #3");
- COMPARE(Rshrn2(v0.V16B(), v1.V8H(), 4), "rshrn2 v0.16b, v1.8h, #4");
- COMPARE(Rshrn2(v1.V8H(), v2.V4S(), 5), "rshrn2 v1.8h, v2.4s, #5");
- COMPARE(Rshrn2(v2.V4S(), v3.V2D(), 6), "rshrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Rshrn(v0.V8B(), v1.V8H(), 1), "rshrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Rshrn(v1.V4H(), v2.V4S(), 2), "rshrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Rshrn(v2.V2S(), v3.V2D(), 3), "rshrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Rshrn2(v0.V16B(), v1.V8H(), 4), "rshrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Rshrn2(v1.V8H(), v2.V4S(), 5), "rshrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Rshrn2(v2.V4S(), v3.V2D(), 6), "rshrn2 v2.4s, v3.2d, #6");
- COMPARE(Uqshrn(v0.V8B(), v1.V8H(), 1), "uqshrn v0.8b, v1.8h, #1");
- COMPARE(Uqshrn(v1.V4H(), v2.V4S(), 2), "uqshrn v1.4h, v2.4s, #2");
- COMPARE(Uqshrn(v2.V2S(), v3.V2D(), 3), "uqshrn v2.2s, v3.2d, #3");
- COMPARE(Uqshrn2(v0.V16B(), v1.V8H(), 4), "uqshrn2 v0.16b, v1.8h, #4");
- COMPARE(Uqshrn2(v1.V8H(), v2.V4S(), 5), "uqshrn2 v1.8h, v2.4s, #5");
- COMPARE(Uqshrn2(v2.V4S(), v3.V2D(), 6), "uqshrn2 v2.4s, v3.2d, #6");
- COMPARE(Uqshrn(b0, h1, 1), "uqshrn b0, h1, #1");
- COMPARE(Uqshrn(h1, s2, 2), "uqshrn h1, s2, #2");
- COMPARE(Uqshrn(s2, d3, 3), "uqshrn s2, d3, #3");
+ COMPARE_MACRO(Uqshrn(v0.V8B(), v1.V8H(), 1), "uqshrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Uqshrn(v1.V4H(), v2.V4S(), 2), "uqshrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Uqshrn(v2.V2S(), v3.V2D(), 3), "uqshrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Uqshrn2(v0.V16B(), v1.V8H(), 4), "uqshrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Uqshrn2(v1.V8H(), v2.V4S(), 5), "uqshrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Uqshrn2(v2.V4S(), v3.V2D(), 6), "uqshrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Uqshrn(b0, h1, 1), "uqshrn b0, h1, #1");
+ COMPARE_MACRO(Uqshrn(h1, s2, 2), "uqshrn h1, s2, #2");
+ COMPARE_MACRO(Uqshrn(s2, d3, 3), "uqshrn s2, d3, #3");
- COMPARE(Uqrshrn(v0.V8B(), v1.V8H(), 1), "uqrshrn v0.8b, v1.8h, #1");
- COMPARE(Uqrshrn(v1.V4H(), v2.V4S(), 2), "uqrshrn v1.4h, v2.4s, #2");
- COMPARE(Uqrshrn(v2.V2S(), v3.V2D(), 3), "uqrshrn v2.2s, v3.2d, #3");
- COMPARE(Uqrshrn2(v0.V16B(), v1.V8H(), 4), "uqrshrn2 v0.16b, v1.8h, #4");
- COMPARE(Uqrshrn2(v1.V8H(), v2.V4S(), 5), "uqrshrn2 v1.8h, v2.4s, #5");
- COMPARE(Uqrshrn2(v2.V4S(), v3.V2D(), 6), "uqrshrn2 v2.4s, v3.2d, #6");
- COMPARE(Uqrshrn(b0, h1, 1), "uqrshrn b0, h1, #1");
- COMPARE(Uqrshrn(h1, s2, 2), "uqrshrn h1, s2, #2");
- COMPARE(Uqrshrn(s2, d3, 3), "uqrshrn s2, d3, #3");
+ COMPARE_MACRO(Uqrshrn(v0.V8B(), v1.V8H(), 1), "uqrshrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Uqrshrn(v1.V4H(), v2.V4S(), 2), "uqrshrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Uqrshrn(v2.V2S(), v3.V2D(), 3), "uqrshrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Uqrshrn2(v0.V16B(), v1.V8H(), 4), "uqrshrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Uqrshrn2(v1.V8H(), v2.V4S(), 5), "uqrshrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Uqrshrn2(v2.V4S(), v3.V2D(), 6), "uqrshrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Uqrshrn(b0, h1, 1), "uqrshrn b0, h1, #1");
+ COMPARE_MACRO(Uqrshrn(h1, s2, 2), "uqrshrn h1, s2, #2");
+ COMPARE_MACRO(Uqrshrn(s2, d3, 3), "uqrshrn s2, d3, #3");
- COMPARE(Sqshrn(v0.V8B(), v1.V8H(), 1), "sqshrn v0.8b, v1.8h, #1");
- COMPARE(Sqshrn(v1.V4H(), v2.V4S(), 2), "sqshrn v1.4h, v2.4s, #2");
- COMPARE(Sqshrn(v2.V2S(), v3.V2D(), 3), "sqshrn v2.2s, v3.2d, #3");
- COMPARE(Sqshrn2(v0.V16B(), v1.V8H(), 4), "sqshrn2 v0.16b, v1.8h, #4");
- COMPARE(Sqshrn2(v1.V8H(), v2.V4S(), 5), "sqshrn2 v1.8h, v2.4s, #5");
- COMPARE(Sqshrn2(v2.V4S(), v3.V2D(), 6), "sqshrn2 v2.4s, v3.2d, #6");
- COMPARE(Sqshrn(b0, h1, 1), "sqshrn b0, h1, #1");
- COMPARE(Sqshrn(h1, s2, 2), "sqshrn h1, s2, #2");
- COMPARE(Sqshrn(s2, d3, 3), "sqshrn s2, d3, #3");
+ COMPARE_MACRO(Sqshrn(v0.V8B(), v1.V8H(), 1), "sqshrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Sqshrn(v1.V4H(), v2.V4S(), 2), "sqshrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Sqshrn(v2.V2S(), v3.V2D(), 3), "sqshrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Sqshrn2(v0.V16B(), v1.V8H(), 4), "sqshrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Sqshrn2(v1.V8H(), v2.V4S(), 5), "sqshrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Sqshrn2(v2.V4S(), v3.V2D(), 6), "sqshrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Sqshrn(b0, h1, 1), "sqshrn b0, h1, #1");
+ COMPARE_MACRO(Sqshrn(h1, s2, 2), "sqshrn h1, s2, #2");
+ COMPARE_MACRO(Sqshrn(s2, d3, 3), "sqshrn s2, d3, #3");
- COMPARE(Sqrshrn(v0.V8B(), v1.V8H(), 1), "sqrshrn v0.8b, v1.8h, #1");
- COMPARE(Sqrshrn(v1.V4H(), v2.V4S(), 2), "sqrshrn v1.4h, v2.4s, #2");
- COMPARE(Sqrshrn(v2.V2S(), v3.V2D(), 3), "sqrshrn v2.2s, v3.2d, #3");
- COMPARE(Sqrshrn2(v0.V16B(), v1.V8H(), 4), "sqrshrn2 v0.16b, v1.8h, #4");
- COMPARE(Sqrshrn2(v1.V8H(), v2.V4S(), 5), "sqrshrn2 v1.8h, v2.4s, #5");
- COMPARE(Sqrshrn2(v2.V4S(), v3.V2D(), 6), "sqrshrn2 v2.4s, v3.2d, #6");
- COMPARE(Sqrshrn(b0, h1, 1), "sqrshrn b0, h1, #1");
- COMPARE(Sqrshrn(h1, s2, 2), "sqrshrn h1, s2, #2");
- COMPARE(Sqrshrn(s2, d3, 3), "sqrshrn s2, d3, #3");
+ COMPARE_MACRO(Sqrshrn(v0.V8B(), v1.V8H(), 1), "sqrshrn v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Sqrshrn(v1.V4H(), v2.V4S(), 2), "sqrshrn v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Sqrshrn(v2.V2S(), v3.V2D(), 3), "sqrshrn v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Sqrshrn2(v0.V16B(), v1.V8H(), 4), "sqrshrn2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Sqrshrn2(v1.V8H(), v2.V4S(), 5), "sqrshrn2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Sqrshrn2(v2.V4S(), v3.V2D(), 6), "sqrshrn2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Sqrshrn(b0, h1, 1), "sqrshrn b0, h1, #1");
+ COMPARE_MACRO(Sqrshrn(h1, s2, 2), "sqrshrn h1, s2, #2");
+ COMPARE_MACRO(Sqrshrn(s2, d3, 3), "sqrshrn s2, d3, #3");
- COMPARE(Sqshrun(v0.V8B(), v1.V8H(), 1), "sqshrun v0.8b, v1.8h, #1");
- COMPARE(Sqshrun(v1.V4H(), v2.V4S(), 2), "sqshrun v1.4h, v2.4s, #2");
- COMPARE(Sqshrun(v2.V2S(), v3.V2D(), 3), "sqshrun v2.2s, v3.2d, #3");
- COMPARE(Sqshrun2(v0.V16B(), v1.V8H(), 4), "sqshrun2 v0.16b, v1.8h, #4");
- COMPARE(Sqshrun2(v1.V8H(), v2.V4S(), 5), "sqshrun2 v1.8h, v2.4s, #5");
- COMPARE(Sqshrun2(v2.V4S(), v3.V2D(), 6), "sqshrun2 v2.4s, v3.2d, #6");
- COMPARE(Sqshrun(b0, h1, 1), "sqshrun b0, h1, #1");
- COMPARE(Sqshrun(h1, s2, 2), "sqshrun h1, s2, #2");
- COMPARE(Sqshrun(s2, d3, 3), "sqshrun s2, d3, #3");
+ COMPARE_MACRO(Sqshrun(v0.V8B(), v1.V8H(), 1), "sqshrun v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Sqshrun(v1.V4H(), v2.V4S(), 2), "sqshrun v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Sqshrun(v2.V2S(), v3.V2D(), 3), "sqshrun v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Sqshrun2(v0.V16B(), v1.V8H(), 4), "sqshrun2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Sqshrun2(v1.V8H(), v2.V4S(), 5), "sqshrun2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Sqshrun2(v2.V4S(), v3.V2D(), 6), "sqshrun2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Sqshrun(b0, h1, 1), "sqshrun b0, h1, #1");
+ COMPARE_MACRO(Sqshrun(h1, s2, 2), "sqshrun h1, s2, #2");
+ COMPARE_MACRO(Sqshrun(s2, d3, 3), "sqshrun s2, d3, #3");
- COMPARE(Sqrshrun(v0.V8B(), v1.V8H(), 1), "sqrshrun v0.8b, v1.8h, #1");
- COMPARE(Sqrshrun(v1.V4H(), v2.V4S(), 2), "sqrshrun v1.4h, v2.4s, #2");
- COMPARE(Sqrshrun(v2.V2S(), v3.V2D(), 3), "sqrshrun v2.2s, v3.2d, #3");
- COMPARE(Sqrshrun2(v0.V16B(), v1.V8H(), 4), "sqrshrun2 v0.16b, v1.8h, #4");
- COMPARE(Sqrshrun2(v1.V8H(), v2.V4S(), 5), "sqrshrun2 v1.8h, v2.4s, #5");
- COMPARE(Sqrshrun2(v2.V4S(), v3.V2D(), 6), "sqrshrun2 v2.4s, v3.2d, #6");
- COMPARE(Sqrshrun(b0, h1, 1), "sqrshrun b0, h1, #1");
- COMPARE(Sqrshrun(h1, s2, 2), "sqrshrun h1, s2, #2");
- COMPARE(Sqrshrun(s2, d3, 3), "sqrshrun s2, d3, #3");
+ COMPARE_MACRO(Sqrshrun(v0.V8B(), v1.V8H(), 1), "sqrshrun v0.8b, v1.8h, #1");
+ COMPARE_MACRO(Sqrshrun(v1.V4H(), v2.V4S(), 2), "sqrshrun v1.4h, v2.4s, #2");
+ COMPARE_MACRO(Sqrshrun(v2.V2S(), v3.V2D(), 3), "sqrshrun v2.2s, v3.2d, #3");
+ COMPARE_MACRO(Sqrshrun2(v0.V16B(), v1.V8H(), 4),
+ "sqrshrun2 v0.16b, v1.8h, #4");
+ COMPARE_MACRO(Sqrshrun2(v1.V8H(), v2.V4S(), 5), "sqrshrun2 v1.8h, v2.4s, #5");
+ COMPARE_MACRO(Sqrshrun2(v2.V4S(), v3.V2D(), 6), "sqrshrun2 v2.4s, v3.2d, #6");
+ COMPARE_MACRO(Sqrshrun(b0, h1, 1), "sqrshrun b0, h1, #1");
+ COMPARE_MACRO(Sqrshrun(h1, s2, 2), "sqrshrun h1, s2, #2");
+ COMPARE_MACRO(Sqrshrun(s2, d3, 3), "sqrshrun s2, d3, #3");
- COMPARE(Scvtf(v5.V2S(), v3.V2S(), 11), "scvtf v5.2s, v3.2s, #11");
- COMPARE(Scvtf(v6.V4S(), v4.V4S(), 12), "scvtf v6.4s, v4.4s, #12");
- COMPARE(Scvtf(v7.V2D(), v5.V2D(), 33), "scvtf v7.2d, v5.2d, #33");
- COMPARE(Scvtf(s8, s6, 13), "scvtf s8, s6, #13");
- COMPARE(Scvtf(d8, d6, 34), "scvtf d8, d6, #34");
+ COMPARE_MACRO(Scvtf(v5.V2S(), v3.V2S(), 11), "scvtf v5.2s, v3.2s, #11");
+ COMPARE_MACRO(Scvtf(v6.V4S(), v4.V4S(), 12), "scvtf v6.4s, v4.4s, #12");
+ COMPARE_MACRO(Scvtf(v7.V2D(), v5.V2D(), 33), "scvtf v7.2d, v5.2d, #33");
+ COMPARE_MACRO(Scvtf(s8, s6, 13), "scvtf s8, s6, #13");
+ COMPARE_MACRO(Scvtf(d8, d6, 34), "scvtf d8, d6, #34");
- COMPARE(Ucvtf(v5.V2S(), v3.V2S(), 11), "ucvtf v5.2s, v3.2s, #11");
- COMPARE(Ucvtf(v6.V4S(), v4.V4S(), 12), "ucvtf v6.4s, v4.4s, #12");
- COMPARE(Ucvtf(v7.V2D(), v5.V2D(), 33), "ucvtf v7.2d, v5.2d, #33");
- COMPARE(Ucvtf(s8, s6, 13), "ucvtf s8, s6, #13");
- COMPARE(Ucvtf(d8, d6, 34), "ucvtf d8, d6, #34");
+ COMPARE_MACRO(Ucvtf(v5.V2S(), v3.V2S(), 11), "ucvtf v5.2s, v3.2s, #11");
+ COMPARE_MACRO(Ucvtf(v6.V4S(), v4.V4S(), 12), "ucvtf v6.4s, v4.4s, #12");
+ COMPARE_MACRO(Ucvtf(v7.V2D(), v5.V2D(), 33), "ucvtf v7.2d, v5.2d, #33");
+ COMPARE_MACRO(Ucvtf(s8, s6, 13), "ucvtf s8, s6, #13");
+ COMPARE_MACRO(Ucvtf(d8, d6, 34), "ucvtf d8, d6, #34");
- COMPARE(Fcvtzs(v5.V2S(), v3.V2S(), 11), "fcvtzs v5.2s, v3.2s, #11");
- COMPARE(Fcvtzs(v6.V4S(), v4.V4S(), 12), "fcvtzs v6.4s, v4.4s, #12");
- COMPARE(Fcvtzs(v7.V2D(), v5.V2D(), 33), "fcvtzs v7.2d, v5.2d, #33");
- COMPARE(Fcvtzs(s8, s6, 13), "fcvtzs s8, s6, #13");
- COMPARE(Fcvtzs(d8, d6, 34), "fcvtzs d8, d6, #34");
+ COMPARE_MACRO(Fcvtzs(v5.V2S(), v3.V2S(), 11), "fcvtzs v5.2s, v3.2s, #11");
+ COMPARE_MACRO(Fcvtzs(v6.V4S(), v4.V4S(), 12), "fcvtzs v6.4s, v4.4s, #12");
+ COMPARE_MACRO(Fcvtzs(v7.V2D(), v5.V2D(), 33), "fcvtzs v7.2d, v5.2d, #33");
+ COMPARE_MACRO(Fcvtzs(s8, s6, 13), "fcvtzs s8, s6, #13");
+ COMPARE_MACRO(Fcvtzs(d8, d6, 34), "fcvtzs d8, d6, #34");
- COMPARE(Fcvtzu(v5.V2S(), v3.V2S(), 11), "fcvtzu v5.2s, v3.2s, #11");
- COMPARE(Fcvtzu(v6.V4S(), v4.V4S(), 12), "fcvtzu v6.4s, v4.4s, #12");
- COMPARE(Fcvtzu(v7.V2D(), v5.V2D(), 33), "fcvtzu v7.2d, v5.2d, #33");
- COMPARE(Fcvtzu(s8, s6, 13), "fcvtzu s8, s6, #13");
- COMPARE(Fcvtzu(d8, d6, 34), "fcvtzu d8, d6, #34");
-
+ COMPARE_MACRO(Fcvtzu(v5.V2S(), v3.V2S(), 11), "fcvtzu v5.2s, v3.2s, #11");
+ COMPARE_MACRO(Fcvtzu(v6.V4S(), v4.V4S(), 12), "fcvtzu v6.4s, v4.4s, #12");
+ COMPARE_MACRO(Fcvtzu(v7.V2D(), v5.V2D(), 33), "fcvtzu v7.2d, v5.2d, #33");
+ COMPARE_MACRO(Fcvtzu(s8, s6, 13), "fcvtzu s8, s6, #13");
+ COMPARE_MACRO(Fcvtzu(d8, d6, 34), "fcvtzu d8, d6, #34");
CLEANUP();
}
diff --git a/test/aarch64/test-fuzz-aarch64.cc b/test/aarch64/test-fuzz-aarch64.cc
index 691efbf..56c1c1d 100644
--- a/test/aarch64/test-fuzz-aarch64.cc
+++ b/test/aarch64/test-fuzz-aarch64.cc
@@ -31,7 +31,7 @@
#include "aarch64/decoder-aarch64.h"
#include "aarch64/disasm-aarch64.h"
-#define TEST(name) TEST_(AARCH64_FUZZ_##name)
+#define TEST(name) TEST_(AARCH64_FUZZ_##name)
namespace vixl {
@@ -117,5 +117,5 @@
}
#endif
-} // namespace aarch64
-} // namespace vixl
+} // namespace aarch64
+} // namespace vixl
diff --git a/test/aarch64/test-simulator-aarch64.cc b/test/aarch64/test-simulator-aarch64.cc
index 09705d6..0fa1e72 100644
--- a/test/aarch64/test-simulator-aarch64.cc
+++ b/test/aarch64/test-simulator-aarch64.cc
@@ -53,76 +53,75 @@
// test-simulator-traces-aarch64.h.
#define __ masm.
-#define TEST(name) TEST_(AARCH64_SIM_##name)
+#define TEST(name) TEST_(AARCH64_SIM_##name)
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
-#define SETUP() \
- MacroAssembler masm; \
- Decoder decoder; \
- Simulator* simulator = Test::run_debugger() ? new Debugger(&decoder) \
- : new Simulator(&decoder); \
- simulator->SetColouredTrace(Test::coloured_trace()); \
- simulator->SetInstructionStats(Test::instruction_stats()); \
+#define SETUP() \
+ MacroAssembler masm; \
+ Decoder decoder; \
+ Simulator* simulator = \
+ Test::run_debugger() ? new Debugger(&decoder) : new Simulator(&decoder); \
+ simulator->SetColouredTrace(Test::coloured_trace()); \
+ simulator->SetInstructionStats(Test::instruction_stats());
-#define START() \
- masm.Reset(); \
- simulator->ResetState(); \
- __ PushCalleeSavedRegisters(); \
- if (Test::trace_reg()) { \
- __ Trace(LOG_STATE, TRACE_ENABLE); \
- } \
- if (Test::trace_write()) { \
- __ Trace(LOG_WRITE, TRACE_ENABLE); \
- } \
- if (Test::trace_sim()) { \
- __ Trace(LOG_DISASM, TRACE_ENABLE); \
- } \
- if (Test::instruction_stats()) { \
- __ EnableInstrumentation(); \
+#define START() \
+ masm.Reset(); \
+ simulator->ResetState(); \
+ __ PushCalleeSavedRegisters(); \
+ if (Test::trace_reg()) { \
+ __ Trace(LOG_STATE, TRACE_ENABLE); \
+ } \
+ if (Test::trace_write()) { \
+ __ Trace(LOG_WRITE, TRACE_ENABLE); \
+ } \
+ if (Test::trace_sim()) { \
+ __ Trace(LOG_DISASM, TRACE_ENABLE); \
+ } \
+ if (Test::instruction_stats()) { \
+ __ EnableInstrumentation(); \
}
-#define END() \
- if (Test::instruction_stats()) { \
- __ DisableInstrumentation(); \
- } \
- __ Trace(LOG_ALL, TRACE_DISABLE); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
+#define END() \
+ if (Test::instruction_stats()) { \
+ __ DisableInstrumentation(); \
+ } \
+ __ Trace(LOG_ALL, TRACE_DISABLE); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
masm.FinalizeCode()
-#define RUN() \
+#define RUN() \
simulator->RunFrom(masm.GetBuffer()->GetStartAddress<Instruction*>())
-#define TEARDOWN() \
- delete simulator;
+#define TEARDOWN() delete simulator;
-#else // VIXL_INCLUDE_SIMULATOR_AARCH64
+#else // VIXL_INCLUDE_SIMULATOR_AARCH64
-#define SETUP() \
- MacroAssembler masm; \
+#define SETUP() \
+ MacroAssembler masm; \
CPU::SetUp()
-#define START() \
- masm.Reset(); \
+#define START() \
+ masm.Reset(); \
__ PushCalleeSavedRegisters()
-#define END() \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
+#define END() \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
masm.FinalizeCode()
-#define RUN() \
- { \
- masm.GetBuffer()->SetExecutable(); \
- ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
- masm.GetSizeOfCodeGenerated()); \
- masm.GetBuffer()->SetWritable(); \
+#define RUN() \
+ { \
+ masm.GetBuffer()->SetExecutable(); \
+ ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(), \
+ masm.GetSizeOfCodeGenerated()); \
+ masm.GetBuffer()->SetWritable(); \
}
#define TEARDOWN()
-#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
// The maximum number of errors to report in detail for each test.
@@ -131,13 +130,9 @@
// Overloaded versions of RawbitsToDouble and RawbitsToFloat for use in the
// templated test functions.
-static float rawbits_to_fp(uint32_t bits) {
- return RawbitsToFloat(bits);
-}
+static float rawbits_to_fp(uint32_t bits) { return RawbitsToFloat(bits); }
-static double rawbits_to_fp(uint64_t bits) {
- return RawbitsToDouble(bits);
-}
+static double rawbits_to_fp(uint64_t bits) { return RawbitsToDouble(bits); }
// MacroAssembler member function pointers to pass to the test dispatchers.
@@ -164,22 +159,26 @@
int fbits);
// TODO: 'Test2OpNEONHelper_t' and 'Test2OpFPHelper_t' can be
// consolidated into one routine.
-typedef void (MacroAssembler::*Test1OpNEONHelper_t)(
- const VRegister& vd, const VRegister& vn);
-typedef void (MacroAssembler::*Test2OpNEONHelper_t)(
- const VRegister& vd, const VRegister& vn, const VRegister& vm);
-typedef void (MacroAssembler::*TestByElementNEONHelper_t)(
- const VRegister& vd, const VRegister& vn, const VRegister& vm, int vm_index);
+typedef void (MacroAssembler::*Test1OpNEONHelper_t)(const VRegister& vd,
+ const VRegister& vn);
+typedef void (MacroAssembler::*Test2OpNEONHelper_t)(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+typedef void (MacroAssembler::*TestByElementNEONHelper_t)(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
typedef void (MacroAssembler::*TestOpImmOpImmVdUpdateNEONHelper_t)(
- const VRegister& vd, int imm1, const VRegister& vn, int imm2);
+ const VRegister& vd, int imm1, const VRegister& vn, int imm2);
// This helps using the same typename for both the function pointer
// and the array of immediates passed to helper routines.
template <typename T>
class Test2OpImmediateNEONHelper_t {
public:
- typedef void (MacroAssembler::*mnemonic)(
- const VRegister& vd, const VRegister& vn, T imm);
+ typedef void (MacroAssembler::*mnemonic)(const VRegister& vd,
+ const VRegister& vn,
+ T imm);
};
@@ -195,9 +194,12 @@
// Standard test dispatchers.
-static void Test1Op_Helper(Test1OpFPHelper_t helper, uintptr_t inputs,
- unsigned inputs_length, uintptr_t results,
- unsigned d_size, unsigned n_size) {
+static void Test1Op_Helper(Test1OpFPHelper_t helper,
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned d_size,
+ unsigned n_size) {
VIXL_ASSERT((d_size == kDRegSize) || (d_size == kSRegSize));
VIXL_ASSERT((n_size == kDRegSize) || (n_size == kSRegSize));
@@ -246,26 +248,34 @@
// rawbits representations of doubles or floats. This ensures that exact bit
// comparisons can be performed.
template <typename Tn, typename Td>
-static void Test1Op(const char * name, Test1OpFPHelper_t helper,
- const Tn inputs[], unsigned inputs_length,
- const Td expected[], unsigned expected_length) {
+static void Test1Op(const char* name,
+ Test1OpFPHelper_t helper,
+ const Tn inputs[],
+ unsigned inputs_length,
+ const Td expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length;
- Td * results = new Td[results_length];
+ Td* results = new Td[results_length];
const unsigned d_bits = sizeof(Td) * 8;
const unsigned n_bits = sizeof(Tn) * 8;
- Test1Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), d_bits, n_bits);
+ Test1Op_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ d_bits,
+ n_bits);
if (Test::generate_test_trace()) {
// Print the results.
printf("const uint%u_t kExpected_%s[] = {\n", d_bits, name);
for (unsigned d = 0; d < results_length; d++) {
printf(" 0x%0*" PRIx64 ",\n",
- d_bits / 4, static_cast<uint64_t>(results[d]));
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]));
}
printf("};\n");
printf("const unsigned kExpectedCount_%s = %u;\n", name, results_length);
@@ -279,13 +289,18 @@
if (++error_count > kErrorReportLimit) continue;
printf("%s 0x%0*" PRIx64 " (%s %g):\n",
- name, n_bits / 4, static_cast<uint64_t>(inputs[n]),
- name, rawbits_to_fp(inputs[n]));
+ name,
+ n_bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ name,
+ rawbits_to_fp(inputs[n]));
printf(" Expected: 0x%0*" PRIx64 " (%g)\n",
- d_bits / 4, static_cast<uint64_t>(expected[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(expected[d]),
rawbits_to_fp(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%g)\n",
- d_bits / 4, static_cast<uint64_t>(results[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]),
rawbits_to_fp(results[d]));
printf("\n");
}
@@ -301,8 +316,10 @@
static void Test2Op_Helper(Test2OpFPHelper_t helper,
- uintptr_t inputs, unsigned inputs_length,
- uintptr_t results, unsigned reg_size) {
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned reg_size) {
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
SETUP();
@@ -341,7 +358,7 @@
SingleEmissionCheckScope guard(&masm);
(masm.*helper)(fd, fn, fm);
}
- __ Str(fd, MemOperand(out, fd.GetSizeInBytes(), PostIndex));
+ __ Str(fd, MemOperand(out, fd.GetSizeInBytes(), PostIndex));
__ Add(index_m, index_m, 1);
__ Cmp(index_m, inputs_length);
@@ -361,25 +378,32 @@
// rawbits representations of doubles or floats. This ensures that exact bit
// comparisons can be performed.
template <typename T>
-static void Test2Op(const char * name, Test2OpFPHelper_t helper,
- const T inputs[], unsigned inputs_length,
- const T expected[], unsigned expected_length) {
+static void Test2Op(const char* name,
+ Test2OpFPHelper_t helper,
+ const T inputs[],
+ unsigned inputs_length,
+ const T expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length * inputs_length;
- T * results = new T[results_length];
+ T* results = new T[results_length];
const unsigned bits = sizeof(T) * 8;
- Test2Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), bits);
+ Test2Op_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ bits);
if (Test::generate_test_trace()) {
// Print the results.
printf("const uint%u_t kExpected_%s[] = {\n", bits, name);
for (unsigned d = 0; d < results_length; d++) {
printf(" 0x%0*" PRIx64 ",\n",
- bits / 4, static_cast<uint64_t>(results[d]));
+ bits / 4,
+ static_cast<uint64_t>(results[d]));
}
printf("};\n");
printf("const unsigned kExpectedCount_%s = %u;\n", name, results_length);
@@ -395,16 +419,20 @@
printf("%s 0x%0*" PRIx64 ", 0x%0*" PRIx64 " (%s %g %g):\n",
name,
- bits / 4, static_cast<uint64_t>(inputs[n]),
- bits / 4, static_cast<uint64_t>(inputs[m]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[m]),
name,
rawbits_to_fp(inputs[n]),
rawbits_to_fp(inputs[m]));
printf(" Expected: 0x%0*" PRIx64 " (%g)\n",
- bits / 4, static_cast<uint64_t>(expected[d]),
+ bits / 4,
+ static_cast<uint64_t>(expected[d]),
rawbits_to_fp(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%g)\n",
- bits / 4, static_cast<uint64_t>(results[d]),
+ bits / 4,
+ static_cast<uint64_t>(results[d]),
rawbits_to_fp(results[d]));
printf("\n");
}
@@ -421,8 +449,10 @@
static void Test3Op_Helper(Test3OpFPHelper_t helper,
- uintptr_t inputs, unsigned inputs_length,
- uintptr_t results, unsigned reg_size) {
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned reg_size) {
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
SETUP();
@@ -491,25 +521,32 @@
// rawbits representations of doubles or floats. This ensures that exact bit
// comparisons can be performed.
template <typename T>
-static void Test3Op(const char * name, Test3OpFPHelper_t helper,
- const T inputs[], unsigned inputs_length,
- const T expected[], unsigned expected_length) {
+static void Test3Op(const char* name,
+ Test3OpFPHelper_t helper,
+ const T inputs[],
+ unsigned inputs_length,
+ const T expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length * inputs_length * inputs_length;
- T * results = new T[results_length];
+ T* results = new T[results_length];
const unsigned bits = sizeof(T) * 8;
- Test3Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), bits);
+ Test3Op_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ bits);
if (Test::generate_test_trace()) {
// Print the results.
printf("const uint%u_t kExpected_%s[] = {\n", bits, name);
for (unsigned d = 0; d < results_length; d++) {
printf(" 0x%0*" PRIx64 ",\n",
- bits / 4, static_cast<uint64_t>(results[d]));
+ bits / 4,
+ static_cast<uint64_t>(results[d]));
}
printf("};\n");
printf("const unsigned kExpectedCount_%s = %u;\n", name, results_length);
@@ -527,18 +564,23 @@
printf("%s 0x%0*" PRIx64 ", 0x%0*" PRIx64 ", 0x%0*" PRIx64
" (%s %g %g %g):\n",
name,
- bits / 4, static_cast<uint64_t>(inputs[n]),
- bits / 4, static_cast<uint64_t>(inputs[m]),
- bits / 4, static_cast<uint64_t>(inputs[a]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[m]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[a]),
name,
rawbits_to_fp(inputs[n]),
rawbits_to_fp(inputs[m]),
rawbits_to_fp(inputs[a]));
printf(" Expected: 0x%0*" PRIx64 " (%g)\n",
- bits / 4, static_cast<uint64_t>(expected[d]),
+ bits / 4,
+ static_cast<uint64_t>(expected[d]),
rawbits_to_fp(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%g)\n",
- bits / 4, static_cast<uint64_t>(results[d]),
+ bits / 4,
+ static_cast<uint64_t>(results[d]),
rawbits_to_fp(results[d]));
printf("\n");
}
@@ -556,8 +598,10 @@
static void TestCmp_Helper(TestFPCmpHelper_t helper,
- uintptr_t inputs, unsigned inputs_length,
- uintptr_t results, unsigned reg_size) {
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned reg_size) {
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
SETUP();
@@ -618,18 +662,24 @@
// rawbits representations of doubles or floats. This ensures that exact bit
// comparisons can be performed.
template <typename T>
-static void TestCmp(const char * name, TestFPCmpHelper_t helper,
- const T inputs[], unsigned inputs_length,
- const uint8_t expected[], unsigned expected_length) {
+static void TestCmp(const char* name,
+ TestFPCmpHelper_t helper,
+ const T inputs[],
+ unsigned inputs_length,
+ const uint8_t expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length * inputs_length;
- uint8_t * results = new uint8_t[results_length];
+ uint8_t* results = new uint8_t[results_length];
const unsigned bits = sizeof(T) * 8;
- TestCmp_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), bits);
+ TestCmp_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -653,8 +703,10 @@
printf("%s 0x%0*" PRIx64 ", 0x%0*" PRIx64 " (%s %g %g):\n",
name,
- bits / 4, static_cast<uint64_t>(inputs[n]),
- bits / 4, static_cast<uint64_t>(inputs[m]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ bits / 4,
+ static_cast<uint64_t>(inputs[m]),
name,
rawbits_to_fp(inputs[n]),
rawbits_to_fp(inputs[m]));
@@ -685,8 +737,10 @@
static void TestCmpZero_Helper(TestFPCmpZeroHelper_t helper,
- uintptr_t inputs, unsigned inputs_length,
- uintptr_t results, unsigned reg_size) {
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned reg_size) {
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
SETUP();
@@ -737,18 +791,24 @@
// rawbits representations of doubles or floats. This ensures that exact bit
// comparisons can be performed.
template <typename T>
-static void TestCmpZero(const char * name, TestFPCmpZeroHelper_t helper,
- const T inputs[], unsigned inputs_length,
- const uint8_t expected[], unsigned expected_length) {
+static void TestCmpZero(const char* name,
+ TestFPCmpZeroHelper_t helper,
+ const T inputs[],
+ unsigned inputs_length,
+ const uint8_t expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length;
- uint8_t * results = new uint8_t[results_length];
+ uint8_t* results = new uint8_t[results_length];
const unsigned bits = sizeof(T) * 8;
- TestCmpZero_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), bits);
+ TestCmpZero_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -771,8 +831,10 @@
printf("%s 0x%0*" PRIx64 ", 0x%0*u (%s %g #0.0):\n",
name,
- bits / 4, static_cast<uint64_t>(inputs[n]),
- bits / 4, 0,
+ bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ bits / 4,
+ 0,
name,
rawbits_to_fp(inputs[n]));
printf(" Expected: %c%c%c%c (0x%" PRIx8 ")\n",
@@ -801,9 +863,11 @@
static void TestFPToFixed_Helper(TestFPToFixedHelper_t helper,
- uintptr_t inputs, unsigned inputs_length,
+ uintptr_t inputs,
+ unsigned inputs_length,
uintptr_t results,
- unsigned d_size, unsigned n_size) {
+ unsigned d_size,
+ unsigned n_size) {
VIXL_ASSERT((d_size == kXRegSize) || (d_size == kWRegSize));
VIXL_ASSERT((n_size == kDRegSize) || (n_size == kSRegSize));
@@ -821,7 +885,7 @@
const int n_index_shift =
(n_size == kDRegSize) ? kDRegSizeInBytesLog2 : kSRegSizeInBytesLog2;
- Register rd = (d_size == kXRegSize) ? x10 : w10;
+ Register rd = (d_size == kXRegSize) ? Register(x10) : Register(w10);
FPRegister fn = (n_size == kDRegSize) ? d1 : s1;
__ Mov(out, results);
@@ -850,9 +914,12 @@
}
-static void TestFPToInt_Helper(TestFPToIntHelper_t helper, uintptr_t inputs,
- unsigned inputs_length, uintptr_t results,
- unsigned d_size, unsigned n_size) {
+static void TestFPToInt_Helper(TestFPToIntHelper_t helper,
+ uintptr_t inputs,
+ unsigned inputs_length,
+ uintptr_t results,
+ unsigned d_size,
+ unsigned n_size) {
VIXL_ASSERT((d_size == kXRegSize) || (d_size == kWRegSize));
VIXL_ASSERT((n_size == kDRegSize) || (n_size == kSRegSize));
@@ -870,7 +937,7 @@
const int n_index_shift =
(n_size == kDRegSize) ? kDRegSizeInBytesLog2 : kSRegSizeInBytesLog2;
- Register rd = (d_size == kXRegSize) ? x10 : w10;
+ Register rd = (d_size == kXRegSize) ? Register(x10) : Register(w10);
FPRegister fn = (n_size == kDRegSize) ? d1 : s1;
__ Mov(out, results);
@@ -903,19 +970,26 @@
// performed.
// - The expected[] array should be an array of signed integers.
template <typename Tn, typename Td>
-static void TestFPToS(const char * name, TestFPToIntHelper_t helper,
- const Tn inputs[], unsigned inputs_length,
- const Td expected[], unsigned expected_length) {
+static void TestFPToS(const char* name,
+ TestFPToIntHelper_t helper,
+ const Tn inputs[],
+ unsigned inputs_length,
+ const Td expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length;
- Td * results = new Td[results_length];
+ Td* results = new Td[results_length];
const unsigned d_bits = sizeof(Td) * 8;
const unsigned n_bits = sizeof(Tn) * 8;
- TestFPToInt_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), d_bits, n_bits);
+ TestFPToInt_Helper(helper,
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ d_bits,
+ n_bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -925,7 +999,7 @@
// Deriving int_d_min in this way (rather than just checking INT64_MIN and
// the like) avoids warnings about comparing values with differing ranges.
const int64_t int_d_max = (UINT64_C(1) << (d_bits - 1)) - 1;
- const int64_t int_d_min = -(int_d_max) - 1;
+ const int64_t int_d_min = -(int_d_max)-1;
for (unsigned d = 0; d < results_length; d++) {
if (results[d] == int_d_min) {
printf(" -INT%u_C(%" PRId64 ") - 1,\n", d_bits, int_d_max);
@@ -953,13 +1027,18 @@
if (++error_count > kErrorReportLimit) continue;
printf("%s 0x%0*" PRIx64 " (%s %g):\n",
- name, n_bits / 4, static_cast<uint64_t>(inputs[n]),
- name, rawbits_to_fp(inputs[n]));
+ name,
+ n_bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ name,
+ rawbits_to_fp(inputs[n]));
printf(" Expected: 0x%0*" PRIx64 " (%" PRId64 ")\n",
- d_bits / 4, static_cast<uint64_t>(expected[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(expected[d]),
static_cast<int64_t>(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%" PRId64 ")\n",
- d_bits / 4, static_cast<uint64_t>(results[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]),
static_cast<int64_t>(results[d]));
printf("\n");
}
@@ -980,20 +1059,26 @@
// performed.
// - The expected[] array should be an array of unsigned integers.
template <typename Tn, typename Td>
-static void TestFPToU(const char * name, TestFPToIntHelper_t helper,
- const Tn inputs[], unsigned inputs_length,
- const Td expected[], unsigned expected_length) {
+static void TestFPToU(const char* name,
+ TestFPToIntHelper_t helper,
+ const Tn inputs[],
+ unsigned inputs_length,
+ const Td expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned results_length = inputs_length;
- Td * results = new Td[results_length];
+ Td* results = new Td[results_length];
const unsigned d_bits = sizeof(Td) * 8;
const unsigned n_bits = sizeof(Tn) * 8;
TestFPToInt_Helper(helper,
- reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), d_bits, n_bits);
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ d_bits,
+ n_bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1013,13 +1098,18 @@
if (++error_count > kErrorReportLimit) continue;
printf("%s 0x%0*" PRIx64 " (%s %g):\n",
- name, n_bits / 4, static_cast<uint64_t>(inputs[n]),
- name, rawbits_to_fp(inputs[n]));
+ name,
+ n_bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ name,
+ rawbits_to_fp(inputs[n]));
printf(" Expected: 0x%0*" PRIx64 " (%" PRIu64 ")\n",
- d_bits / 4, static_cast<uint64_t>(expected[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(expected[d]),
static_cast<uint64_t>(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%" PRIu64 ")\n",
- d_bits / 4, static_cast<uint64_t>(results[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]),
static_cast<uint64_t>(results[d]));
printf("\n");
}
@@ -1040,20 +1130,26 @@
// performed.
// - The expected[] array should be an array of signed integers.
template <typename Tn, typename Td>
-static void TestFPToFixedS(const char * name, TestFPToFixedHelper_t helper,
- const Tn inputs[], unsigned inputs_length,
- const Td expected[], unsigned expected_length) {
+static void TestFPToFixedS(const char* name,
+ TestFPToFixedHelper_t helper,
+ const Tn inputs[],
+ unsigned inputs_length,
+ const Td expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned d_bits = sizeof(Td) * 8;
const unsigned n_bits = sizeof(Tn) * 8;
const unsigned results_length = inputs_length * (d_bits + 1);
- Td * results = new Td[results_length];
+ Td* results = new Td[results_length];
TestFPToFixed_Helper(helper,
- reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), d_bits, n_bits);
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ d_bits,
+ n_bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1063,7 +1159,7 @@
// Deriving int_d_min in this way (rather than just checking INT64_MIN and
// the like) avoids warnings about comparing values with differing ranges.
const int64_t int_d_max = (UINT64_C(1) << (d_bits - 1)) - 1;
- const int64_t int_d_min = -(int_d_max) - 1;
+ const int64_t int_d_min = -(int_d_max)-1;
for (unsigned d = 0; d < results_length; d++) {
if (results[d] == int_d_min) {
printf(" -INT%u_C(%" PRId64 ") - 1,\n", d_bits, int_d_max);
@@ -1092,13 +1188,20 @@
if (++error_count > kErrorReportLimit) continue;
printf("%s 0x%0*" PRIx64 " #%d (%s %g #%d):\n",
- name, n_bits / 4, static_cast<uint64_t>(inputs[n]), fbits,
- name, rawbits_to_fp(inputs[n]), fbits);
+ name,
+ n_bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ fbits,
+ name,
+ rawbits_to_fp(inputs[n]),
+ fbits);
printf(" Expected: 0x%0*" PRIx64 " (%" PRId64 ")\n",
- d_bits / 4, static_cast<uint64_t>(expected[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(expected[d]),
static_cast<int64_t>(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%" PRId64 ")\n",
- d_bits / 4, static_cast<uint64_t>(results[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]),
static_cast<int64_t>(results[d]));
printf("\n");
}
@@ -1120,20 +1223,26 @@
// performed.
// - The expected[] array should be an array of unsigned integers.
template <typename Tn, typename Td>
-static void TestFPToFixedU(const char * name, TestFPToFixedHelper_t helper,
- const Tn inputs[], unsigned inputs_length,
- const Td expected[], unsigned expected_length) {
+static void TestFPToFixedU(const char* name,
+ TestFPToFixedHelper_t helper,
+ const Tn inputs[],
+ unsigned inputs_length,
+ const Td expected[],
+ unsigned expected_length) {
VIXL_ASSERT(inputs_length > 0);
const unsigned d_bits = sizeof(Td) * 8;
const unsigned n_bits = sizeof(Tn) * 8;
const unsigned results_length = inputs_length * (d_bits + 1);
- Td * results = new Td[results_length];
+ Td* results = new Td[results_length];
TestFPToFixed_Helper(helper,
- reinterpret_cast<uintptr_t>(inputs), inputs_length,
- reinterpret_cast<uintptr_t>(results), d_bits, n_bits);
+ reinterpret_cast<uintptr_t>(inputs),
+ inputs_length,
+ reinterpret_cast<uintptr_t>(results),
+ d_bits,
+ n_bits);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1154,13 +1263,20 @@
if (++error_count > kErrorReportLimit) continue;
printf("%s 0x%0*" PRIx64 " #%d (%s %g #%d):\n",
- name, n_bits / 4, static_cast<uint64_t>(inputs[n]), fbits,
- name, rawbits_to_fp(inputs[n]), fbits);
+ name,
+ n_bits / 4,
+ static_cast<uint64_t>(inputs[n]),
+ fbits,
+ name,
+ rawbits_to_fp(inputs[n]),
+ fbits);
printf(" Expected: 0x%0*" PRIx64 " (%" PRIu64 ")\n",
- d_bits / 4, static_cast<uint64_t>(expected[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(expected[d]),
static_cast<uint64_t>(expected[d]));
printf(" Found: 0x%0*" PRIx64 " (%" PRIu64 ")\n",
- d_bits / 4, static_cast<uint64_t>(results[d]),
+ d_bits / 4,
+ static_cast<uint64_t>(results[d]),
static_cast<uint64_t>(results[d]));
printf("\n");
}
@@ -1180,7 +1296,8 @@
static void Test1OpNEON_Helper(Test1OpNEONHelper_t helper,
- uintptr_t inputs_n, unsigned inputs_n_length,
+ uintptr_t inputs_n,
+ unsigned inputs_n_length,
uintptr_t results,
VectorFormat vd_form,
VectorFormat vn_form) {
@@ -1233,8 +1350,8 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn, vn, vntmp, vn_lane_bytes);
// Set the destination to zero.
@@ -1263,9 +1380,12 @@
// arrays of rawbit representation of input values. This ensures that
// exact bit comparisons can be performed.
template <typename Td, typename Tn>
-static void Test1OpNEON(const char * name, Test1OpNEONHelper_t helper,
- const Tn inputs_n[], unsigned inputs_n_length,
- const Td expected[], unsigned expected_length,
+static void Test1OpNEON(const char* name,
+ Test1OpNEONHelper_t helper,
+ const Tn inputs_n[],
+ unsigned inputs_n_length,
+ const Td expected[],
+ unsigned expected_length,
VectorFormat vd_form,
VectorFormat vn_form) {
VIXL_ASSERT(inputs_n_length > 0);
@@ -1283,7 +1403,8 @@
reinterpret_cast<uintptr_t>(inputs_n),
inputs_n_length,
reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form);
+ vd_form,
+ vn_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1326,27 +1447,29 @@
if (error_in_vector && (++error_count <= kErrorReportLimit)) {
printf("%s\n", name);
printf(" Vn%.*s| Vd%.*s| Expected\n",
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
const unsigned first_index_n =
- inputs_n_length - (16 / vn_lane_bytes) + n + 1;
+ inputs_n_length - (16 / vn_lane_bytes) + n + 1;
- for (unsigned lane = 0;
- lane < std::max(vd_lane_count, vn_lane_count);
+ for (unsigned lane = 0; lane < std::max(vd_lane_count, vn_lane_count);
lane++) {
unsigned output_index = (n * vd_lane_count) + lane;
unsigned input_index_n = (first_index_n + lane) % inputs_n_length;
- printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " "
- "| 0x%0*" PRIx64 "\n",
- results[output_index] != expected[output_index] ? '*' : ' ',
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_n[input_index_n]),
- lane_len_in_hex,
- static_cast<uint64_t>(results[output_index]),
- lane_len_in_hex,
- static_cast<uint64_t>(expected[output_index]));
+ printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64
+ " "
+ "| 0x%0*" PRIx64 "\n",
+ results[output_index] != expected[output_index] ? '*' : ' ',
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_n[input_index_n]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(results[output_index]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(expected[output_index]));
}
}
}
@@ -1432,8 +1555,8 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn_ext, vn_ext, vntmp_ext, vn_lane_bytes);
if (destructive) {
@@ -1460,9 +1583,12 @@
// arrays of rawbit representation of input values. This ensures that
// exact bit comparisons can be performed.
template <typename Td, typename Tn>
-static void Test1OpAcrossNEON(const char * name, Test1OpNEONHelper_t helper,
- const Tn inputs_n[], unsigned inputs_n_length,
- const Td expected[], unsigned expected_length,
+static void Test1OpAcrossNEON(const char* name,
+ Test1OpNEONHelper_t helper,
+ const Tn inputs_n[],
+ unsigned inputs_n_length,
+ const Td expected[],
+ unsigned expected_length,
VectorFormat vd_form,
VectorFormat vn_form) {
VIXL_ASSERT(inputs_n_length > 0);
@@ -1479,7 +1605,8 @@
reinterpret_cast<uintptr_t>(inputs_n),
inputs_n_length,
reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form);
+ vd_form,
+ vn_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1534,8 +1661,10 @@
printf("%s\n", name);
printf(" Vn%.*s| Vd%.*s| Expected\n",
- lane_len_in_hex + 1, padding,
- lane_len_in_hex + 1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
// TODO: In case of an error, all tests print out as many elements as
// there are lanes in the output or input vectors. This way
@@ -1546,9 +1675,11 @@
// This output for the 'Across' category has the required
// modifications.
for (unsigned lane = 0; lane < vn_lane_count; lane++) {
- unsigned results_index = (n * vd_lanes_per_q) + ((vn_lane_count - 1) - lane);
- unsigned input_index_n = (inputs_n_length - vn_lane_count +
- n + 1 + lane) % inputs_n_length;
+ unsigned results_index =
+ (n * vd_lanes_per_q) + ((vn_lane_count - 1) - lane);
+ unsigned input_index_n =
+ (inputs_n_length - vn_lane_count + n + 1 + lane) %
+ inputs_n_length;
Td expect = 0;
if ((vn_lane_count - 1) == lane) {
@@ -1584,8 +1715,10 @@
static void Test2OpNEON_Helper(Test2OpNEONHelper_t helper,
uintptr_t inputs_d,
- uintptr_t inputs_n, unsigned inputs_n_length,
- uintptr_t inputs_m, unsigned inputs_m_length,
+ uintptr_t inputs_n,
+ unsigned inputs_n_length,
+ uintptr_t inputs_m,
+ unsigned inputs_m_length,
uintptr_t results,
VectorFormat vd_form,
VectorFormat vn_form,
@@ -1659,15 +1792,15 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn, vn, vntmp, vn_lane_bytes);
__ Mov(index_m, 0);
__ Bind(&loop_m);
- __ Ldr(vmtmp_single, MemOperand(inputs_m_base, index_m, LSL,
- vm_lane_bytes_log2));
+ __ Ldr(vmtmp_single,
+ MemOperand(inputs_m_base, index_m, LSL, vm_lane_bytes_log2));
__ Ext(vm, vm, vmtmp, vm_lane_bytes);
__ Mov(vres, vd);
@@ -1695,11 +1828,15 @@
// arrays of rawbit representation of input values. This ensures that
// exact bit comparisons can be performed.
template <typename Td, typename Tn, typename Tm>
-static void Test2OpNEON(const char * name, Test2OpNEONHelper_t helper,
+static void Test2OpNEON(const char* name,
+ Test2OpNEONHelper_t helper,
const Td inputs_d[],
- const Tn inputs_n[], unsigned inputs_n_length,
- const Tm inputs_m[], unsigned inputs_m_length,
- const Td expected[], unsigned expected_length,
+ const Tn inputs_n[],
+ unsigned inputs_n_length,
+ const Tm inputs_m[],
+ unsigned inputs_m_length,
+ const Td expected[],
+ unsigned expected_length,
VectorFormat vd_form,
VectorFormat vn_form,
VectorFormat vm_form) {
@@ -1714,10 +1851,14 @@
Test2OpNEON_Helper(helper,
reinterpret_cast<uintptr_t>(inputs_d),
- reinterpret_cast<uintptr_t>(inputs_n), inputs_n_length,
- reinterpret_cast<uintptr_t>(inputs_m), inputs_m_length,
+ reinterpret_cast<uintptr_t>(inputs_n),
+ inputs_n_length,
+ reinterpret_cast<uintptr_t>(inputs_m),
+ inputs_m_length,
reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form, vm_form);
+ vd_form,
+ vn_form,
+ vm_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1751,7 +1892,7 @@
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index = (n * inputs_m_length * vd_lane_count) +
- (m * vd_lane_count) + lane;
+ (m * vd_lane_count) + lane;
if (results[output_index] != expected[output_index]) {
error_in_vector = true;
@@ -1762,20 +1903,27 @@
if (error_in_vector && (++error_count <= kErrorReportLimit)) {
printf("%s\n", name);
printf(" Vd%.*s| Vn%.*s| Vm%.*s| Vd%.*s| Expected\n",
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index = (n * inputs_m_length * vd_lane_count) +
- (m * vd_lane_count) + lane;
- unsigned input_index_n = (inputs_n_length - vd_lane_count +
- n + 1 + lane) % inputs_n_length;
- unsigned input_index_m = (inputs_m_length - vd_lane_count +
- m + 1 + lane) % inputs_m_length;
+ (m * vd_lane_count) + lane;
+ unsigned input_index_n =
+ (inputs_n_length - vd_lane_count + n + 1 + lane) %
+ inputs_n_length;
+ unsigned input_index_m =
+ (inputs_m_length - vd_lane_count + m + 1 + lane) %
+ inputs_m_length;
- printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64 " "
+ printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64
+ " "
"| 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
results[output_index] != expected[output_index] ? '*' : ' ',
lane_len_in_hex,
@@ -1885,15 +2033,15 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn, vn, vntmp, vn_lane_bytes);
__ Mov(index_m, 0);
__ Bind(&loop_m);
- __ Ldr(vmtmp_single, MemOperand(inputs_m_base, index_m, LSL,
- vm_lane_bytes_log2));
+ __ Ldr(vmtmp_single,
+ MemOperand(inputs_m_base, index_m, LSL, vm_lane_bytes_log2));
__ Ext(vm, vm, vmtmp, vm_lane_bytes);
__ Mov(vres, vd);
@@ -1921,18 +2069,21 @@
}
-
// Test NEON instructions. The inputs_*[] and expected[] arrays should be
// arrays of rawbit representation of input values. This ensures that
// exact bit comparisons can be performed.
template <typename Td, typename Tn, typename Tm>
-static void TestByElementNEON(const char *name,
+static void TestByElementNEON(const char* name,
TestByElementNEONHelper_t helper,
const Td inputs_d[],
- const Tn inputs_n[], unsigned inputs_n_length,
- const Tm inputs_m[], unsigned inputs_m_length,
- const int indices[], unsigned indices_length,
- const Td expected[], unsigned expected_length,
+ const Tn inputs_n[],
+ unsigned inputs_n_length,
+ const Tm inputs_m[],
+ unsigned inputs_m_length,
+ const int indices[],
+ unsigned indices_length,
+ const Td expected[],
+ unsigned expected_length,
VectorFormat vd_form,
VectorFormat vn_form,
VectorFormat vm_form) {
@@ -1942,19 +2093,24 @@
const unsigned vd_lane_count = MaxLaneCountFromFormat(vd_form);
- const unsigned results_length = inputs_n_length * inputs_m_length *
- indices_length;
+ const unsigned results_length =
+ inputs_n_length * inputs_m_length * indices_length;
Td* results = new Td[results_length * vd_lane_count];
const unsigned lane_bit = sizeof(Td) * 8;
const unsigned lane_len_in_hex = MaxHexCharCount<Td, Tm>();
TestByElementNEON_Helper(helper,
- reinterpret_cast<uintptr_t>(inputs_d),
- reinterpret_cast<uintptr_t>(inputs_n), inputs_n_length,
- reinterpret_cast<uintptr_t>(inputs_m), inputs_m_length,
- indices, indices_length,
- reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form, vm_form);
+ reinterpret_cast<uintptr_t>(inputs_d),
+ reinterpret_cast<uintptr_t>(inputs_n),
+ inputs_n_length,
+ reinterpret_cast<uintptr_t>(inputs_m),
+ inputs_m_length,
+ indices,
+ indices_length,
+ reinterpret_cast<uintptr_t>(results),
+ vd_form,
+ vn_form,
+ vm_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -1990,8 +2146,8 @@
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index =
(n * inputs_m_length * indices_length * vd_lane_count) +
- (m * indices_length * vd_lane_count) +
- (index * vd_lane_count) + lane;
+ (m * indices_length * vd_lane_count) + (index * vd_lane_count) +
+ lane;
if (results[output_index] != expected[output_index]) {
error_in_vector = true;
@@ -2002,35 +2158,43 @@
if (error_in_vector && (++error_count <= kErrorReportLimit)) {
printf("%s\n", name);
printf(" Vd%.*s| Vn%.*s| Vm%.*s| Index | Vd%.*s| Expected\n",
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding,
- lane_len_in_hex+1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index =
(n * inputs_m_length * indices_length * vd_lane_count) +
(m * indices_length * vd_lane_count) +
(index * vd_lane_count) + lane;
- unsigned input_index_n = (inputs_n_length - vd_lane_count +
- n + 1 + lane) % inputs_n_length;
- unsigned input_index_m = (inputs_m_length - vd_lane_count +
- m + 1 + lane) % inputs_m_length;
+ unsigned input_index_n =
+ (inputs_n_length - vd_lane_count + n + 1 + lane) %
+ inputs_n_length;
+ unsigned input_index_m =
+ (inputs_m_length - vd_lane_count + m + 1 + lane) %
+ inputs_m_length;
- printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64 " "
- "| [%3d] | 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
- results[output_index] != expected[output_index] ? '*' : ' ',
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_d[lane]),
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_n[input_index_n]),
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_m[input_index_m]),
- indices[index],
- lane_len_in_hex,
- static_cast<uint64_t>(results[output_index]),
- lane_len_in_hex,
- static_cast<uint64_t>(expected[output_index]));
+ printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64
+ " "
+ "| [%3d] | 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
+ results[output_index] != expected[output_index] ? '*'
+ : ' ',
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_d[lane]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_n[input_index_n]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_m[input_index_m]),
+ indices[index],
+ lane_len_in_hex,
+ static_cast<uint64_t>(results[output_index]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(expected[output_index]));
}
}
}
@@ -2059,8 +2223,7 @@
uintptr_t results,
VectorFormat vd_form,
VectorFormat vn_form) {
- VIXL_ASSERT(vd_form != kFormatUndefined &&
- vn_form != kFormatUndefined);
+ VIXL_ASSERT(vd_form != kFormatUndefined && vn_form != kFormatUndefined);
SETUP();
START();
@@ -2108,8 +2271,8 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn, vn, vntmp, vn_lane_bytes);
// Set the destination to zero for tests such as '[r]shrn2'.
@@ -2142,11 +2305,14 @@
// exact bit comparisons can be performed.
template <typename Td, typename Tn, typename Tm>
static void Test2OpImmNEON(
- const char * name,
+ const char* name,
typename Test2OpImmediateNEONHelper_t<Tm>::mnemonic helper,
- const Tn inputs_n[], unsigned inputs_n_length,
- const Tm inputs_m[], unsigned inputs_m_length,
- const Td expected[], unsigned expected_length,
+ const Tn inputs_n[],
+ unsigned inputs_n_length,
+ const Tm inputs_m[],
+ unsigned inputs_m_length,
+ const Td expected[],
+ unsigned expected_length,
VectorFormat vd_form,
VectorFormat vn_form) {
VIXL_ASSERT(inputs_n_length > 0 && inputs_m_length > 0);
@@ -2161,10 +2327,13 @@
const unsigned lane_len_in_hex = MaxHexCharCount<Td, Tn>();
Test2OpImmNEON_Helper(helper,
- reinterpret_cast<uintptr_t>(inputs_n), inputs_n_length,
- inputs_m, inputs_m_length,
+ reinterpret_cast<uintptr_t>(inputs_n),
+ inputs_n_length,
+ inputs_m,
+ inputs_m_length,
reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form);
+ vd_form,
+ vn_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -2198,7 +2367,7 @@
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index = (n * inputs_m_length * vd_lane_count) +
- (m * vd_lane_count) + lane;
+ (m * vd_lane_count) + lane;
if (results[output_index] != expected[output_index]) {
error_in_vector = true;
@@ -2209,22 +2378,25 @@
if (error_in_vector && (++error_count <= kErrorReportLimit)) {
printf("%s\n", name);
printf(" Vn%.*s| Imm%.*s| Vd%.*s| Expected\n",
- lane_len_in_hex+1, padding,
- lane_len_in_hex, padding,
- lane_len_in_hex+1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
- const unsigned first_index_n =
- inputs_n_length - (16 / vn_lane_bytes) + n + 1;
+ const unsigned first_index_n =
+ inputs_n_length - (16 / vn_lane_bytes) + n + 1;
- for (unsigned lane = 0;
- lane < std::max(vd_lane_count, vn_lane_count);
- lane++) {
+ for (unsigned lane = 0; lane < std::max(vd_lane_count, vn_lane_count);
+ lane++) {
unsigned output_index = (n * inputs_m_length * vd_lane_count) +
- (m * vd_lane_count) + lane;
+ (m * vd_lane_count) + lane;
unsigned input_index_n = (first_index_n + lane) % inputs_n_length;
unsigned input_index_m = m;
- printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " "
+ printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64
+ " "
"| 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
results[output_index] != expected[output_index] ? '*' : ' ',
lane_len_in_hex,
@@ -2252,14 +2424,17 @@
// ==== Tests for instructions of the form <INST> VReg, #Imm, VReg, #Imm. ====
-static void TestOpImmOpImmNEON_Helper(
- TestOpImmOpImmVdUpdateNEONHelper_t helper,
- uintptr_t inputs_d,
- const int inputs_imm1[], unsigned inputs_imm1_length,
- uintptr_t inputs_n, unsigned inputs_n_length,
- const int inputs_imm2[], unsigned inputs_imm2_length,
- uintptr_t results,
- VectorFormat vd_form, VectorFormat vn_form) {
+static void TestOpImmOpImmNEON_Helper(TestOpImmOpImmVdUpdateNEONHelper_t helper,
+ uintptr_t inputs_d,
+ const int inputs_imm1[],
+ unsigned inputs_imm1_length,
+ uintptr_t inputs_n,
+ unsigned inputs_n_length,
+ const int inputs_imm2[],
+ unsigned inputs_imm2_length,
+ uintptr_t results,
+ VectorFormat vd_form,
+ VectorFormat vn_form) {
VIXL_ASSERT(vd_form != kFormatUndefined);
VIXL_ASSERT(vn_form != kFormatUndefined);
@@ -2318,13 +2493,14 @@
__ Mov(index_n, 0);
__ Bind(&loop_n);
- __ Ldr(vntmp_single, MemOperand(inputs_n_base, index_n, LSL,
- vn_lane_bytes_log2));
+ __ Ldr(vntmp_single,
+ MemOperand(inputs_n_base, index_n, LSL, vn_lane_bytes_log2));
__ Ext(vn_ext, vn_ext, vntmp_ext, vn_lane_bytes);
{
EmissionCheckScope guard(&masm,
- kInstructionSize * inputs_imm1_length * inputs_imm2_length * 3);
+ kInstructionSize * inputs_imm1_length *
+ inputs_imm2_length * 3);
for (unsigned i = 0; i < inputs_imm1_length; i++) {
for (unsigned j = 0; j < inputs_imm2_length; j++) {
__ Mov(vres, vd);
@@ -2348,7 +2524,7 @@
// arrays of rawbit representation of input values. This ensures that
// exact bit comparisons can be performed.
template <typename Td, typename Tn>
-static void TestOpImmOpImmNEON(const char * name,
+static void TestOpImmOpImmNEON(const char* name,
TestOpImmOpImmVdUpdateNEONHelper_t helper,
const Td inputs_d[],
const int inputs_imm1[],
@@ -2367,8 +2543,8 @@
const unsigned vd_lane_count = LaneCountFromFormat(vd_form);
- const unsigned results_length = inputs_n_length *
- inputs_imm1_length * inputs_imm2_length;
+ const unsigned results_length =
+ inputs_n_length * inputs_imm1_length * inputs_imm2_length;
Td* results = new Td[results_length * vd_lane_count];
const unsigned lane_bit = sizeof(Td) * 8;
@@ -2383,7 +2559,8 @@
inputs_imm2,
inputs_imm2_length,
reinterpret_cast<uintptr_t>(results),
- vd_form, vn_form);
+ vd_form,
+ vn_form);
if (Test::generate_test_trace()) {
// Print the results.
@@ -2420,8 +2597,7 @@
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index =
- (n * inputs_imm1_length *
- inputs_imm2_length * vd_lane_count) +
+ (n * inputs_imm1_length * inputs_imm2_length * vd_lane_count) +
(imm1 * inputs_imm2_length * vd_lane_count) +
(imm2 * vd_lane_count) + lane;
@@ -2434,39 +2610,46 @@
if (error_in_vector && (++error_count <= kErrorReportLimit)) {
printf("%s\n", name);
printf(" Vd%.*s| Imm%.*s| Vn%.*s| Imm%.*s| Vd%.*s| Expected\n",
- lane_len_in_hex+1, padding,
- lane_len_in_hex, padding,
- lane_len_in_hex+1, padding,
- lane_len_in_hex, padding,
- lane_len_in_hex+1, padding);
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex,
+ padding,
+ lane_len_in_hex + 1,
+ padding,
+ lane_len_in_hex,
+ padding,
+ lane_len_in_hex + 1,
+ padding);
for (unsigned lane = 0; lane < vd_lane_count; lane++) {
unsigned output_index =
- (n * inputs_imm1_length *
- inputs_imm2_length * vd_lane_count) +
- (imm1 * inputs_imm2_length * vd_lane_count) +
- (imm2 * vd_lane_count) + lane;
- unsigned input_index_n = (inputs_n_length - vd_lane_count +
- n + 1 + lane) % inputs_n_length;
+ (n * inputs_imm1_length * inputs_imm2_length *
+ vd_lane_count) +
+ (imm1 * inputs_imm2_length * vd_lane_count) +
+ (imm2 * vd_lane_count) + lane;
+ unsigned input_index_n =
+ (inputs_n_length - vd_lane_count + n + 1 + lane) %
+ inputs_n_length;
unsigned input_index_imm1 = imm1;
unsigned input_index_imm2 = imm2;
- printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64 " "
- "| 0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
- results[output_index] !=
- expected[output_index] ? '*' : ' ',
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_d[lane]),
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_imm1[input_index_imm1]),
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_n[input_index_n]),
- lane_len_in_hex,
- static_cast<uint64_t>(inputs_imm2[input_index_imm2]),
- lane_len_in_hex,
- static_cast<uint64_t>(results[output_index]),
- lane_len_in_hex,
- static_cast<uint64_t>(expected[output_index]));
+ printf("%c0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64
+ " "
+ "| 0x%0*" PRIx64 " | 0x%0*" PRIx64 " | 0x%0*" PRIx64 "\n",
+ results[output_index] != expected[output_index] ? '*'
+ : ' ',
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_d[lane]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_imm1[input_index_imm1]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_n[input_index_n]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(inputs_imm2[input_index_imm2]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(results[output_index]),
+ lane_len_in_hex,
+ static_cast<uint64_t>(expected[output_index]));
}
}
}
@@ -2489,20 +2672,21 @@
// operations.
#define STRINGIFY(s) #s
-#define CALL_TEST_FP_HELPER(mnemonic, variant, type, input) \
- Test##type(STRINGIFY(mnemonic) "_" STRINGIFY(variant), \
- &MacroAssembler::mnemonic, \
- input, sizeof(input) / sizeof(input[0]), \
- kExpected_##mnemonic##_##variant, \
- kExpectedCount_##mnemonic##_##variant)
+#define CALL_TEST_FP_HELPER(mnemonic, variant, type, input) \
+ Test##type(STRINGIFY(mnemonic) "_" STRINGIFY(variant), \
+ &MacroAssembler::mnemonic, \
+ input, \
+ sizeof(input) / sizeof(input[0]), \
+ kExpected_##mnemonic##_##variant, \
+ kExpectedCount_##mnemonic##_##variant)
-#define DEFINE_TEST_FP(mnemonic, type, input) \
- TEST(mnemonic##_d) { \
- CALL_TEST_FP_HELPER(mnemonic, d, type, kInputDouble##input); \
- } \
- TEST(mnemonic##_s) { \
- CALL_TEST_FP_HELPER(mnemonic, s, type, kInputFloat##input); \
- }
+#define DEFINE_TEST_FP(mnemonic, type, input) \
+ TEST(mnemonic##_d) { \
+ CALL_TEST_FP_HELPER(mnemonic, d, type, kInputDouble##input); \
+ } \
+ TEST(mnemonic##_s) { \
+ CALL_TEST_FP_HELPER(mnemonic, s, type, kInputFloat##input); \
+ }
// TODO: Test with a newer version of valgrind.
//
@@ -2544,19 +2728,19 @@
TEST(fcvt_sd) { CALL_TEST_FP_HELPER(fcvt, sd, 1Op, kInputDoubleConversions); }
TEST(fcvt_ds) { CALL_TEST_FP_HELPER(fcvt, ds, 1Op, kInputFloatConversions); }
-#define DEFINE_TEST_FP_TO_INT(mnemonic, type, input) \
- TEST(mnemonic##_xd) { \
- CALL_TEST_FP_HELPER(mnemonic, xd, type, kInputDouble##input); \
- } \
- TEST(mnemonic##_xs) { \
- CALL_TEST_FP_HELPER(mnemonic, xs, type, kInputFloat##input); \
- } \
- TEST(mnemonic##_wd) { \
- CALL_TEST_FP_HELPER(mnemonic, wd, type, kInputDouble##input); \
- } \
- TEST(mnemonic##_ws) { \
- CALL_TEST_FP_HELPER(mnemonic, ws, type, kInputFloat##input); \
- }
+#define DEFINE_TEST_FP_TO_INT(mnemonic, type, input) \
+ TEST(mnemonic##_xd) { \
+ CALL_TEST_FP_HELPER(mnemonic, xd, type, kInputDouble##input); \
+ } \
+ TEST(mnemonic##_xs) { \
+ CALL_TEST_FP_HELPER(mnemonic, xs, type, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_wd) { \
+ CALL_TEST_FP_HELPER(mnemonic, wd, type, kInputDouble##input); \
+ } \
+ TEST(mnemonic##_ws) { \
+ CALL_TEST_FP_HELPER(mnemonic, ws, type, kInputFloat##input); \
+ }
DEFINE_TEST_FP_TO_INT(fcvtas, FPToS, Conversions)
DEFINE_TEST_FP_TO_INT(fcvtau, FPToU, Conversions)
@@ -2578,1139 +2762,1307 @@
// ==== NEON Tests. ====
-#define CALL_TEST_NEON_HELPER_1Op(mnemonic, \
- vdform, vnform, \
- input_n) \
- Test1OpNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
- &MacroAssembler::mnemonic, \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- kExpected_NEON_##mnemonic##_##vdform, \
- kExpectedCount_NEON_##mnemonic##_##vdform, \
- kFormat##vdform, \
- kFormat##vnform)
+#define CALL_TEST_NEON_HELPER_1Op(mnemonic, vdform, vnform, input_n) \
+ Test1OpNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
+ &MacroAssembler::mnemonic, \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ kExpected_NEON_##mnemonic##_##vdform, \
+ kExpectedCount_NEON_##mnemonic##_##vdform, \
+ kFormat##vdform, \
+ kFormat##vnform)
-#define CALL_TEST_NEON_HELPER_1OpAcross(mnemonic, \
- vdform, vnform, \
- input_n) \
- Test1OpAcrossNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform) \
- "_" STRINGIFY(vnform), \
- &MacroAssembler::mnemonic, \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- kExpected_NEON_##mnemonic##_##vdform##_##vnform, \
- kExpectedCount_NEON_##mnemonic##_##vdform##_##vnform, \
- kFormat##vdform, \
- kFormat##vnform)
+#define CALL_TEST_NEON_HELPER_1OpAcross(mnemonic, vdform, vnform, input_n) \
+ Test1OpAcrossNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform) "_" STRINGIFY( \
+ vnform), \
+ &MacroAssembler::mnemonic, \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ kExpected_NEON_##mnemonic##_##vdform##_##vnform, \
+ kExpectedCount_NEON_##mnemonic##_##vdform##_##vnform, \
+ kFormat##vdform, \
+ kFormat##vnform)
-#define CALL_TEST_NEON_HELPER_2Op(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, input_m) \
- Test2OpNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
- &MacroAssembler::mnemonic, \
- input_d, \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- input_m, \
- (sizeof(input_m) / sizeof(input_m[0])), \
- kExpected_NEON_##mnemonic##_##vdform, \
- kExpectedCount_NEON_##mnemonic##_##vdform, \
- kFormat##vdform, \
- kFormat##vnform, \
- kFormat##vmform)
+#define CALL_TEST_NEON_HELPER_2Op( \
+ mnemonic, vdform, vnform, vmform, input_d, input_n, input_m) \
+ Test2OpNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
+ &MacroAssembler::mnemonic, \
+ input_d, \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ input_m, \
+ (sizeof(input_m) / sizeof(input_m[0])), \
+ kExpected_NEON_##mnemonic##_##vdform, \
+ kExpectedCount_NEON_##mnemonic##_##vdform, \
+ kFormat##vdform, \
+ kFormat##vnform, \
+ kFormat##vmform)
-#define CALL_TEST_NEON_HELPER_2OpImm(mnemonic, \
- vdform, vnform, \
- input_n, input_m) \
- Test2OpImmNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform) "_2OPIMM", \
- &MacroAssembler::mnemonic, \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- input_m, \
- (sizeof(input_m) / sizeof(input_m[0])), \
- kExpected_NEON_##mnemonic##_##vdform##_2OPIMM, \
- kExpectedCount_NEON_##mnemonic##_##vdform##_2OPIMM, \
- kFormat##vdform, \
- kFormat##vnform)
+#define CALL_TEST_NEON_HELPER_2OpImm( \
+ mnemonic, vdform, vnform, input_n, input_m) \
+ Test2OpImmNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform) "_2OPIMM", \
+ &MacroAssembler::mnemonic, \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ input_m, \
+ (sizeof(input_m) / sizeof(input_m[0])), \
+ kExpected_NEON_##mnemonic##_##vdform##_2OPIMM, \
+ kExpectedCount_NEON_##mnemonic##_##vdform##_2OPIMM, \
+ kFormat##vdform, \
+ kFormat##vnform)
-#define CALL_TEST_NEON_HELPER_ByElement(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, input_m, indices) \
- TestByElementNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform) \
- "_" STRINGIFY(vnform) "_" STRINGIFY(vmform), \
- &MacroAssembler::mnemonic, \
- input_d, \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- input_m, \
- (sizeof(input_m) / sizeof(input_m[0])), \
- indices, \
- (sizeof(indices) / sizeof(indices[0])), \
- kExpected_NEON_##mnemonic##_##vdform##_##vnform##_##vmform, \
- kExpectedCount_NEON_##mnemonic##_##vdform##_##vnform##_##vmform, \
- kFormat##vdform, \
- kFormat##vnform, \
- kFormat##vmform)
+#define CALL_TEST_NEON_HELPER_ByElement( \
+ mnemonic, vdform, vnform, vmform, input_d, input_n, input_m, indices) \
+ TestByElementNEON( \
+ STRINGIFY(mnemonic) "_" STRINGIFY(vdform) "_" STRINGIFY( \
+ vnform) "_" STRINGIFY(vmform), \
+ &MacroAssembler::mnemonic, \
+ input_d, \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ input_m, \
+ (sizeof(input_m) / sizeof(input_m[0])), \
+ indices, \
+ (sizeof(indices) / sizeof(indices[0])), \
+ kExpected_NEON_##mnemonic##_##vdform##_##vnform##_##vmform, \
+ kExpectedCount_NEON_##mnemonic##_##vdform##_##vnform##_##vmform, \
+ kFormat##vdform, \
+ kFormat##vnform, \
+ kFormat##vmform)
-#define CALL_TEST_NEON_HELPER_OpImmOpImm(helper, \
- mnemonic, \
- vdform, vnform, \
- input_d, input_imm1, \
- input_n, input_imm2) \
- TestOpImmOpImmNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
- helper, \
- input_d, \
- input_imm1, \
- (sizeof(input_imm1) / sizeof(input_imm1[0])), \
- input_n, \
- (sizeof(input_n) / sizeof(input_n[0])), \
- input_imm2, \
- (sizeof(input_imm2) / sizeof(input_imm2[0])), \
- kExpected_NEON_##mnemonic##_##vdform, \
- kExpectedCount_NEON_##mnemonic##_##vdform, \
- kFormat##vdform, \
- kFormat##vnform)
+#define CALL_TEST_NEON_HELPER_OpImmOpImm(helper, \
+ mnemonic, \
+ vdform, \
+ vnform, \
+ input_d, \
+ input_imm1, \
+ input_n, \
+ input_imm2) \
+ TestOpImmOpImmNEON(STRINGIFY(mnemonic) "_" STRINGIFY(vdform), \
+ helper, \
+ input_d, \
+ input_imm1, \
+ (sizeof(input_imm1) / sizeof(input_imm1[0])), \
+ input_n, \
+ (sizeof(input_n) / sizeof(input_n[0])), \
+ input_imm2, \
+ (sizeof(input_imm2) / sizeof(input_imm2[0])), \
+ kExpected_NEON_##mnemonic##_##vdform, \
+ kExpectedCount_NEON_##mnemonic##_##vdform, \
+ kFormat##vdform, \
+ kFormat##vnform)
-#define CALL_TEST_NEON_HELPER_2SAME(mnemonic, variant, input) \
- CALL_TEST_NEON_HELPER_1Op(mnemonic, \
- variant, variant, \
- input)
+#define CALL_TEST_NEON_HELPER_2SAME(mnemonic, variant, input) \
+ CALL_TEST_NEON_HELPER_1Op(mnemonic, variant, variant, input)
-#define DEFINE_TEST_NEON_2SAME_8B_16B(mnemonic, input) \
- TEST(mnemonic##_8B) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 8B, kInput8bits##input); \
- } \
- TEST(mnemonic##_16B) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 16B, kInput8bits##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_8B_16B(mnemonic, input) \
+ TEST(mnemonic##_8B) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 8B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_16B) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 16B, kInput8bits##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_4H_8H(mnemonic, input) \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4H, kInput16bits##input); \
- } \
- TEST(mnemonic##_8H) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 8H, kInput16bits##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_4H_8H(mnemonic, input) \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_8H) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 8H, kInput16bits##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input) \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2S, kInput32bits##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4S, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input) \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2S, kInput32bits##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4S, kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_BH(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_8B_16B(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_4H_8H(mnemonic, input)
+#define DEFINE_TEST_NEON_2SAME_BH(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_8B_16B(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_4H_8H(mnemonic, input)
-#define DEFINE_TEST_NEON_2SAME_NO2D(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_BH(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input)
+#define DEFINE_TEST_NEON_2SAME_NO2D(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_BH(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input)
-#define DEFINE_TEST_NEON_2SAME(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_NO2D(mnemonic, input) \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInput64bits##input); \
- }
-#define DEFINE_TEST_NEON_2SAME_SD(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input) \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_2SAME(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_NO2D(mnemonic, input) \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInput64bits##input); \
+ }
+#define DEFINE_TEST_NEON_2SAME_SD(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_2S_4S(mnemonic, input) \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_FP(mnemonic, input) \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2S, kInputFloat##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4S, kInputFloat##input); \
- } \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_FP(mnemonic, input) \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 4S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, 2D, kInputDouble##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_FP_SCALAR(mnemonic, input) \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, S, kInputFloat##input); \
- } \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, D, kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_FP_SCALAR(mnemonic, input) \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, D, kInputDouble##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_SCALAR_B(mnemonic, input) \
- TEST(mnemonic##_B) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, B, kInput8bits##input); \
- }
-#define DEFINE_TEST_NEON_2SAME_SCALAR_H(mnemonic, input) \
- TEST(mnemonic##_H) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, H, kInput16bits##input); \
- }
-#define DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, S, kInput32bits##input); \
- }
-#define DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input) \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_2SAME(mnemonic, D, kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_2SAME_SCALAR_B(mnemonic, input) \
+ TEST(mnemonic##_B) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, B, kInput8bits##input); \
+ }
+#define DEFINE_TEST_NEON_2SAME_SCALAR_H(mnemonic, input) \
+ TEST(mnemonic##_H) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, H, kInput16bits##input); \
+ }
+#define DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, S, kInput32bits##input); \
+ }
+#define DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input) \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_2SAME(mnemonic, D, kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_2SAME_SCALAR(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_B(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_H(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input)
+#define DEFINE_TEST_NEON_2SAME_SCALAR(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_B(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_H(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input)
-#define DEFINE_TEST_NEON_2SAME_SCALAR_SD(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
- DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input)
+#define DEFINE_TEST_NEON_2SAME_SCALAR_SD(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_S(mnemonic, input) \
+ DEFINE_TEST_NEON_2SAME_SCALAR_D(mnemonic, input)
-#define CALL_TEST_NEON_HELPER_ACROSS(mnemonic, vd_form, vn_form, input_n) \
- CALL_TEST_NEON_HELPER_1OpAcross(mnemonic, \
- vd_form, vn_form, \
- input_n)
+#define CALL_TEST_NEON_HELPER_ACROSS(mnemonic, vd_form, vn_form, input_n) \
+ CALL_TEST_NEON_HELPER_1OpAcross(mnemonic, vd_form, vn_form, input_n)
-#define DEFINE_TEST_NEON_ACROSS(mnemonic, input) \
- TEST(mnemonic##_B_8B) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, B, 8B, kInput8bits##input); \
- } \
- TEST(mnemonic##_B_16B) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, B, 16B, kInput8bits##input); \
- } \
- TEST(mnemonic##_H_4H) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 4H, kInput16bits##input); \
- } \
- TEST(mnemonic##_H_8H) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 8H, kInput16bits##input); \
- } \
- TEST(mnemonic##_S_4S) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4S, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_ACROSS(mnemonic, input) \
+ TEST(mnemonic##_B_8B) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, B, 8B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_B_16B) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, B, 16B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_H_4H) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 4H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_H_8H) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 8H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_S_4S) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4S, kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_ACROSS_LONG(mnemonic, input) \
- TEST(mnemonic##_H_8B) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 8B, kInput8bits##input); \
- } \
- TEST(mnemonic##_H_16B) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 16B, kInput8bits##input); \
- } \
- TEST(mnemonic##_S_4H) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4H, kInput16bits##input); \
- } \
- TEST(mnemonic##_S_8H) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 8H, kInput16bits##input); \
- } \
- TEST(mnemonic##_D_4S) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, D, 4S, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_ACROSS_LONG(mnemonic, input) \
+ TEST(mnemonic##_H_8B) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 8B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_H_16B) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, H, 16B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_S_4H) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_S_8H) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 8H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_D_4S) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, D, 4S, kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_ACROSS_FP(mnemonic, input) \
- TEST(mnemonic##_S_4S) { \
- CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4S, kInputFloat##input); \
- }
+#define DEFINE_TEST_NEON_ACROSS_FP(mnemonic, input) \
+ TEST(mnemonic##_S_4S) { \
+ CALL_TEST_NEON_HELPER_ACROSS(mnemonic, S, 4S, kInputFloat##input); \
+ }
-#define CALL_TEST_NEON_HELPER_2DIFF(mnemonic, \
- vdform, vnform, \
- input_n) \
- CALL_TEST_NEON_HELPER_1Op(mnemonic, \
- vdform, vnform, \
- input_n)
+#define CALL_TEST_NEON_HELPER_2DIFF(mnemonic, vdform, vnform, input_n) \
+ CALL_TEST_NEON_HELPER_1Op(mnemonic, vdform, vnform, input_n)
-#define DEFINE_TEST_NEON_2DIFF_LONG(mnemonic, input) \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 8B, kInput8bits##input); \
- } \
- TEST(mnemonic##_8H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 8H, 16B, kInput8bits##input); \
- } \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 4H, kInput16bits##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4S, 8H, kInput16bits##input); \
- } \
- TEST(mnemonic##_1D) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 1D, 2S, kInput32bits##input); \
- } \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2D, 4S, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_LONG(mnemonic, input) \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 8B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_8H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 8H, 16B, kInput8bits##input); \
+ } \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 4H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4S, 8H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_1D) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 1D, 2S, kInput32bits##input); \
+ } \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2D, 4S, kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_NARROW(mnemonic, input) \
- TEST(mnemonic##_8B) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 8B, 8H, kInput16bits##input); \
- } \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 4S, kInput32bits##input); \
- } \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInput64bits##input); \
- } \
- TEST(mnemonic##2_16B) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 16B, 8H, kInput16bits##input);\
- } \
- TEST(mnemonic##2_8H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 8H, 4S, kInput32bits##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_NARROW(mnemonic, input) \
+ TEST(mnemonic##_8B) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 8B, 8H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 4S, kInput32bits##input); \
+ } \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInput64bits##input); \
+ } \
+ TEST(mnemonic##2_16B) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 16B, 8H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##2_8H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 8H, 4S, kInput32bits##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_FP_LONG(mnemonic, input) \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4S, 4H, kInputFloat16##input); \
- } \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2D, 2S, kInputFloat##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 8H, kInputFloat16##input);\
- } \
- TEST(mnemonic##2_2D) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 2D, 4S, kInputFloat##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_FP_LONG(mnemonic, input) \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4S, 4H, kInputFloat16##input); \
+ } \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2D, 2S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 8H, kInputFloat16##input); \
+ } \
+ TEST(mnemonic##2_2D) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 2D, 4S, kInputFloat##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_FP_NARROW(mnemonic, input) \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 4S, kInputFloat##input); \
- } \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInputDouble##input); \
- } \
- TEST(mnemonic##2_8H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 8H, 4S, kInputFloat##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_FP_NARROW(mnemonic, input) \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 4H, 4S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInputDouble##input); \
+ } \
+ TEST(mnemonic##2_8H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 8H, 4S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInputDouble##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_FP_NARROW_2S(mnemonic, input) \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInputDouble##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_FP_NARROW_2S(mnemonic, input) \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, 2S, 2D, kInputDouble##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic##2, 4S, 2D, kInputDouble##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_SCALAR_NARROW(mnemonic, input) \
- TEST(mnemonic##_B) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, B, H, kInput16bits##input); \
- } \
- TEST(mnemonic##_H) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, H, S, kInput32bits##input); \
- } \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, S, D, kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_SCALAR_NARROW(mnemonic, input) \
+ TEST(mnemonic##_B) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, B, H, kInput16bits##input); \
+ } \
+ TEST(mnemonic##_H) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, H, S, kInput32bits##input); \
+ } \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, S, D, kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_2DIFF_FP_SCALAR_SD(mnemonic, input) \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, S, 2S, kInputFloat##input); \
- } \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_2DIFF(mnemonic, D, 2D, kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_2DIFF_FP_SCALAR_SD(mnemonic, input) \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, S, 2S, kInputFloat##input); \
+ } \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_2DIFF(mnemonic, D, 2D, kInputDouble##input); \
+ }
-#define CALL_TEST_NEON_HELPER_3SAME(mnemonic, variant, input_d, input_nm) { \
- CALL_TEST_NEON_HELPER_2Op(mnemonic, \
- variant, variant, variant, \
- input_d, input_nm, input_nm); \
- }
+#define CALL_TEST_NEON_HELPER_3SAME(mnemonic, variant, input_d, input_nm) \
+ { \
+ CALL_TEST_NEON_HELPER_2Op(mnemonic, \
+ variant, \
+ variant, \
+ variant, \
+ input_d, \
+ input_nm, \
+ input_nm); \
+ }
-#define DEFINE_TEST_NEON_3SAME_8B_16B(mnemonic, input) \
- TEST(mnemonic##_8B) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 8B, \
- kInput8bitsAccDestination, \
- kInput8bits##input); \
- } \
- TEST(mnemonic##_16B) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 16B, \
- kInput8bitsAccDestination, \
- kInput8bits##input); \
- } \
+#define DEFINE_TEST_NEON_3SAME_8B_16B(mnemonic, input) \
+ TEST(mnemonic##_8B) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 8B, \
+ kInput8bitsAccDestination, \
+ kInput8bits##input); \
+ } \
+ TEST(mnemonic##_16B) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 16B, \
+ kInput8bitsAccDestination, \
+ kInput8bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_HS(mnemonic, input) \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 4H, \
- kInput16bitsAccDestination, \
- kInput16bits##input); \
- } \
- TEST(mnemonic##_8H) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 8H, \
- kInput16bitsAccDestination, \
- kInput16bits##input); \
- } \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 2S, \
- kInput32bitsAccDestination, \
- kInput32bits##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 4S, \
- kInput32bitsAccDestination, \
- kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_3SAME_HS(mnemonic, input) \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 4H, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_8H) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 8H, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 2S, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 4S, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_NO2D(mnemonic, input) \
- DEFINE_TEST_NEON_3SAME_8B_16B(mnemonic, input) \
- DEFINE_TEST_NEON_3SAME_HS(mnemonic, input)
+#define DEFINE_TEST_NEON_3SAME_NO2D(mnemonic, input) \
+ DEFINE_TEST_NEON_3SAME_8B_16B(mnemonic, input) \
+ DEFINE_TEST_NEON_3SAME_HS(mnemonic, input)
-#define DEFINE_TEST_NEON_3SAME(mnemonic, input) \
- DEFINE_TEST_NEON_3SAME_NO2D(mnemonic, input) \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 2D, \
- kInput64bitsAccDestination, \
- kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_3SAME(mnemonic, input) \
+ DEFINE_TEST_NEON_3SAME_NO2D(mnemonic, input) \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 2D, \
+ kInput64bitsAccDestination, \
+ kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_FP(mnemonic, input) \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 2S, \
- kInputFloatAccDestination, \
- kInputFloat##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 4S, \
- kInputFloatAccDestination, \
- kInputFloat##input); \
- } \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, 2D, \
- kInputDoubleAccDestination, \
- kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_3SAME_FP(mnemonic, input) \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 2S, \
+ kInputFloatAccDestination, \
+ kInputFloat##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 4S, \
+ kInputFloatAccDestination, \
+ kInputFloat##input); \
+ } \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ 2D, \
+ kInputDoubleAccDestination, \
+ kInputDouble##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_SCALAR_D(mnemonic, input) \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, D, \
- kInput64bitsAccDestination, \
- kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_3SAME_SCALAR_D(mnemonic, input) \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ D, \
+ kInput64bitsAccDestination, \
+ kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_SCALAR_HS(mnemonic, input) \
- TEST(mnemonic##_H) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, H, \
- kInput16bitsAccDestination, \
- kInput16bits##input); \
- } \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, S, \
- kInput32bitsAccDestination, \
- kInput32bits##input); \
- } \
+#define DEFINE_TEST_NEON_3SAME_SCALAR_HS(mnemonic, input) \
+ TEST(mnemonic##_H) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ H, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ S, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_SCALAR(mnemonic, input) \
- TEST(mnemonic##_B) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, B, \
- kInput8bitsAccDestination, \
- kInput8bits##input); \
- } \
- TEST(mnemonic##_H) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, H, \
- kInput16bitsAccDestination, \
- kInput16bits##input); \
- } \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, S, \
- kInput32bitsAccDestination, \
- kInput32bits##input); \
- } \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, D, \
- kInput64bitsAccDestination, \
- kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_3SAME_SCALAR(mnemonic, input) \
+ TEST(mnemonic##_B) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ B, \
+ kInput8bitsAccDestination, \
+ kInput8bits##input); \
+ } \
+ TEST(mnemonic##_H) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ H, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ S, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ D, \
+ kInput64bitsAccDestination, \
+ kInput64bits##input); \
+ }
-#define DEFINE_TEST_NEON_3SAME_FP_SCALAR(mnemonic, input) \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, S, \
- kInputFloatAccDestination, \
- kInputFloat##input); \
- } \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_3SAME(mnemonic, D, \
- kInputDoubleAccDestination, \
- kInputDouble##input); \
- }
+#define DEFINE_TEST_NEON_3SAME_FP_SCALAR(mnemonic, input) \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ S, \
+ kInputFloatAccDestination, \
+ kInputFloat##input); \
+ } \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_3SAME(mnemonic, \
+ D, \
+ kInputDoubleAccDestination, \
+ kInputDouble##input); \
+ }
-#define CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, input_m) { \
- CALL_TEST_NEON_HELPER_2Op(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, input_m); \
- }
+#define CALL_TEST_NEON_HELPER_3DIFF( \
+ mnemonic, vdform, vnform, vmform, input_d, input_n, input_m) \
+ { \
+ CALL_TEST_NEON_HELPER_2Op(mnemonic, \
+ vdform, \
+ vnform, \
+ vmform, \
+ input_d, \
+ input_n, \
+ input_m); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_LONG_8H(mnemonic, input) \
- TEST(mnemonic##_8H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 8H, 8B, 8B, \
- kInput16bitsAccDestination, \
- kInput8bits##input, kInput8bits##input); \
- } \
- TEST(mnemonic##2_8H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 8H, 16B, 16B, \
- kInput16bitsAccDestination, \
- kInput8bits##input, kInput8bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_LONG_8H(mnemonic, input) \
+ TEST(mnemonic##_8H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 8H, \
+ 8B, \
+ 8B, \
+ kInput16bitsAccDestination, \
+ kInput8bits##input, \
+ kInput8bits##input); \
+ } \
+ TEST(mnemonic##2_8H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 8H, \
+ 16B, \
+ 16B, \
+ kInput16bitsAccDestination, \
+ kInput8bits##input, \
+ kInput8bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 4S, 4H, 4H, \
- kInput32bitsAccDestination, \
- kInput16bits##input, kInput16bits##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 4S, 8H, 8H, \
- kInput32bitsAccDestination, \
- kInput16bits##input, kInput16bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 4S, \
+ 4H, \
+ 4H, \
+ kInput32bitsAccDestination, \
+ kInput16bits##input, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 4S, \
+ 8H, \
+ 8H, \
+ kInput32bitsAccDestination, \
+ kInput16bits##input, \
+ kInput16bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input) \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 2D, 2S, 2S, \
- kInput64bitsAccDestination, \
- kInput32bits##input, kInput32bits##input); \
- } \
- TEST(mnemonic##2_2D) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 2D, 4S, 4S, \
- kInput64bitsAccDestination, \
- kInput32bits##input, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input) \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 2D, \
+ 2S, \
+ 2S, \
+ kInput64bitsAccDestination, \
+ kInput32bits##input, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##2_2D) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 2D, \
+ 4S, \
+ 4S, \
+ kInput64bitsAccDestination, \
+ kInput32bits##input, \
+ kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_LONG_SD(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input)
+#define DEFINE_TEST_NEON_3DIFF_LONG_SD(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input)
-#define DEFINE_TEST_NEON_3DIFF_LONG(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_LONG_8H(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input)
+#define DEFINE_TEST_NEON_3DIFF_LONG(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_LONG_8H(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_LONG_4S(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_LONG_2D(mnemonic, input)
-#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_S(mnemonic, input) \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, S, H, H, \
- kInput32bitsAccDestination, \
- kInput16bits##input, \
- kInput16bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_S(mnemonic, input) \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ S, \
+ H, \
+ H, \
+ kInput32bitsAccDestination, \
+ kInput16bits##input, \
+ kInput16bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_D(mnemonic, input) \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, D, S, S, \
- kInput64bitsAccDestination, \
- kInput32bits##input, \
- kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_D(mnemonic, input) \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ D, \
+ S, \
+ S, \
+ kInput64bitsAccDestination, \
+ kInput32bits##input, \
+ kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_SD(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_S(mnemonic, input) \
- DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_D(mnemonic, input)
+#define DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_SD(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_S(mnemonic, input) \
+ DEFINE_TEST_NEON_3DIFF_SCALAR_LONG_D(mnemonic, input)
-#define DEFINE_TEST_NEON_3DIFF_WIDE(mnemonic, input) \
- TEST(mnemonic##_8H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 8H, 8H, 8B, \
- kInput16bitsAccDestination, \
- kInput16bits##input, kInput8bits##input); \
- } \
- TEST(mnemonic##_4S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 4S, 4S, 4H, \
- kInput32bitsAccDestination, \
- kInput32bits##input, kInput16bits##input); \
- } \
- TEST(mnemonic##_2D) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 2D, 2D, 2S, \
- kInput64bitsAccDestination, \
- kInput64bits##input, kInput32bits##input); \
- } \
- TEST(mnemonic##2_8H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 8H, 8H, 16B, \
- kInput16bitsAccDestination, \
- kInput16bits##input, kInput8bits##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 4S, 4S, 8H, \
- kInput32bitsAccDestination, \
- kInput32bits##input, kInput16bits##input); \
- } \
- TEST(mnemonic##2_2D) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 2D, 2D, 4S, \
- kInput64bitsAccDestination, \
- kInput64bits##input, kInput32bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_WIDE(mnemonic, input) \
+ TEST(mnemonic##_8H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 8H, \
+ 8H, \
+ 8B, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input, \
+ kInput8bits##input); \
+ } \
+ TEST(mnemonic##_4S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 4S, \
+ 4S, \
+ 4H, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_2D) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 2D, \
+ 2D, \
+ 2S, \
+ kInput64bitsAccDestination, \
+ kInput64bits##input, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##2_8H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 8H, \
+ 8H, \
+ 16B, \
+ kInput16bitsAccDestination, \
+ kInput16bits##input, \
+ kInput8bits##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 4S, \
+ 4S, \
+ 8H, \
+ kInput32bitsAccDestination, \
+ kInput32bits##input, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##2_2D) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 2D, \
+ 2D, \
+ 4S, \
+ kInput64bitsAccDestination, \
+ kInput64bits##input, \
+ kInput32bits##input); \
+ }
-#define DEFINE_TEST_NEON_3DIFF_NARROW(mnemonic, input) \
- TEST(mnemonic##_8B) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 8B, 8H, 8H, \
- kInput8bitsAccDestination, \
- kInput16bits##input, kInput16bits##input); \
- } \
- TEST(mnemonic##_4H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 4H, 4S, 4S, \
- kInput16bitsAccDestination, \
- kInput32bits##input, kInput32bits##input); \
- } \
- TEST(mnemonic##_2S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic, 2S, 2D, 2D, \
- kInput32bitsAccDestination, \
- kInput64bits##input, kInput64bits##input); \
- } \
- TEST(mnemonic##2_16B) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 16B, 8H, 8H, \
- kInput8bitsAccDestination, \
- kInput16bits##input, kInput16bits##input); \
- } \
- TEST(mnemonic##2_8H) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 8H, 4S, 4S, \
- kInput16bitsAccDestination, \
- kInput32bits##input, kInput32bits##input); \
- } \
- TEST(mnemonic##2_4S) { \
- CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, 4S, 2D, 2D, \
- kInput32bitsAccDestination, \
- kInput64bits##input, kInput64bits##input); \
- }
+#define DEFINE_TEST_NEON_3DIFF_NARROW(mnemonic, input) \
+ TEST(mnemonic##_8B) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 8B, \
+ 8H, \
+ 8H, \
+ kInput8bitsAccDestination, \
+ kInput16bits##input, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##_4H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 4H, \
+ 4S, \
+ 4S, \
+ kInput16bitsAccDestination, \
+ kInput32bits##input, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##_2S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic, \
+ 2S, \
+ 2D, \
+ 2D, \
+ kInput32bitsAccDestination, \
+ kInput64bits##input, \
+ kInput64bits##input); \
+ } \
+ TEST(mnemonic##2_16B) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 16B, \
+ 8H, \
+ 8H, \
+ kInput8bitsAccDestination, \
+ kInput16bits##input, \
+ kInput16bits##input); \
+ } \
+ TEST(mnemonic##2_8H) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 8H, \
+ 4S, \
+ 4S, \
+ kInput16bitsAccDestination, \
+ kInput32bits##input, \
+ kInput32bits##input); \
+ } \
+ TEST(mnemonic##2_4S) { \
+ CALL_TEST_NEON_HELPER_3DIFF(mnemonic##2, \
+ 4S, \
+ 2D, \
+ 2D, \
+ kInput32bitsAccDestination, \
+ kInput64bits##input, \
+ kInput64bits##input); \
+ }
-#define CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- vdform, vnform, \
- input_n, \
- input_imm) { \
- CALL_TEST_NEON_HELPER_2OpImm(mnemonic, \
- vdform, vnform, \
- input_n, input_imm); \
- }
+#define CALL_TEST_NEON_HELPER_2OPIMM( \
+ mnemonic, vdform, vnform, input_n, input_imm) \
+ { \
+ CALL_TEST_NEON_HELPER_2OpImm(mnemonic, \
+ vdform, \
+ vnform, \
+ input_n, \
+ input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM(mnemonic, input, input_imm) \
- TEST(mnemonic##_8B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8B, 8B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_16B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 16B, 16B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4H, 4H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_8H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8H, 8H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2S, 2S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4S, 4S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2D, 2D, \
- kInput64bits##input, \
- kInput64bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM(mnemonic, input, input_imm) \
+ TEST(mnemonic##_8B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8B, \
+ 8B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_16B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 16B, \
+ 16B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4H, \
+ 4H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_8H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8H, \
+ 8H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ 2S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ 4S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ 2D, \
+ kInput64bits##input, \
+ kInput64bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_COPY(mnemonic, input, input_imm) \
- TEST(mnemonic##_8B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8B, B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_16B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 16B, B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4H, H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_8H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8H, H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2S, S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4S, S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2D, D, \
- kInput64bits##input, \
- kInput64bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_COPY(mnemonic, input, input_imm) \
+ TEST(mnemonic##_8B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8B, \
+ B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_16B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 16B, \
+ B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4H, \
+ H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_8H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8H, \
+ H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ D, \
+ kInput64bits##input, \
+ kInput64bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_NARROW(mnemonic, input, input_imm) \
- TEST(mnemonic##_8B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8B, 8H, \
- kInput16bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4H, 4S, \
- kInput32bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2S, 2D, \
- kInput64bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_16B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 16B, 8H, \
- kInput16bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_8H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 8H, 4S, \
- kInput32bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 4S, 2D, \
- kInput64bits##input, \
- kInput32bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_NARROW(mnemonic, input, input_imm) \
+ TEST(mnemonic##_8B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8B, \
+ 8H, \
+ kInput16bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4H, \
+ 4S, \
+ kInput32bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ 2D, \
+ kInput64bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_16B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 16B, \
+ 8H, \
+ kInput16bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_8H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 8H, \
+ 4S, \
+ kInput32bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 4S, \
+ 2D, \
+ kInput64bits##input, \
+ kInput32bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(mnemonic, input, input_imm) \
- TEST(mnemonic##_B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- B, H, \
- kInput16bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- H, S, \
- kInput32bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- S, D, \
- kInput64bits##input, \
- kInput32bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(mnemonic, input, input_imm) \
+ TEST(mnemonic##_B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ B, \
+ H, \
+ kInput16bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ H, \
+ S, \
+ kInput32bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ S, \
+ D, \
+ kInput64bits##input, \
+ kInput32bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_FCMP_ZERO(mnemonic, input, input_imm) \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 2S, 2S, \
- kInputFloat##Basic, \
- kInputDoubleImm##input_imm) \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 4S, 4S, \
- kInputFloat##input, \
- kInputDoubleImm##input_imm); \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 2D, 2D, \
- kInputDouble##input, \
- kInputDoubleImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_FCMP_ZERO(mnemonic, input, input_imm) \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ 2S, \
+ kInputFloat##Basic, \
+ kInputDoubleImm##input_imm) \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ 4S, \
+ kInputFloat##input, \
+ kInputDoubleImm##input_imm); \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ 2D, \
+ kInputDouble##input, \
+ kInputDoubleImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_FP(mnemonic, input, input_imm) \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 2S, 2S, \
- kInputFloat##Basic, \
- kInput32bitsImm##input_imm) \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 4S, 4S, \
- kInputFloat##input, \
- kInput32bitsImm##input_imm) \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- 2D, 2D, \
- kInputDouble##input, \
- kInput64bitsImm##input_imm) \
- }
+#define DEFINE_TEST_NEON_2OPIMM_FP(mnemonic, input, input_imm) \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ 2S, \
+ kInputFloat##Basic, \
+ kInput32bitsImm##input_imm) \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ 4S, \
+ kInputFloat##input, \
+ kInput32bitsImm##input_imm) \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ 2D, \
+ kInputDouble##input, \
+ kInput64bitsImm##input_imm) \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR(mnemonic, input, input_imm) \
- TEST(mnemonic##_S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- S, S, \
- kInputFloat##Basic, \
- kInput32bitsImm##input_imm) \
- } \
- TEST(mnemonic##_D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM( \
- mnemonic, \
- D, D, \
- kInputDouble##input, \
- kInput64bitsImm##input_imm) \
- }
+#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR(mnemonic, input, input_imm) \
+ TEST(mnemonic##_S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ S, \
+ S, \
+ kInputFloat##Basic, \
+ kInput32bitsImm##input_imm) \
+ } \
+ TEST(mnemonic##_D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ D, \
+ D, \
+ kInputDouble##input, \
+ kInput64bitsImm##input_imm) \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_SD(mnemonic, input, input_imm) \
- TEST(mnemonic##_2S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2S, 2S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4S, 4S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2D, 2D, \
- kInput64bits##input, \
- kInput64bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_SD(mnemonic, input, input_imm) \
+ TEST(mnemonic##_2S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2S, \
+ 2S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ 4S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ 2D, \
+ kInput64bits##input, \
+ kInput64bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_SCALAR_D(mnemonic, input, input_imm) \
- TEST(mnemonic##_D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- D, D, \
- kInput64bits##input, \
- kInput64bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_SCALAR_D(mnemonic, input, input_imm) \
+ TEST(mnemonic##_D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ D, \
+ D, \
+ kInput64bits##input, \
+ kInput64bitsImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(mnemonic, input, input_imm) \
- TEST(mnemonic##_S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- S, S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- DEFINE_TEST_NEON_2OPIMM_SCALAR_D(mnemonic, input, input_imm)
+#define DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(mnemonic, input, input_imm) \
+ TEST(mnemonic##_S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ S, \
+ S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ DEFINE_TEST_NEON_2OPIMM_SCALAR_D(mnemonic, input, input_imm)
-#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_D(mnemonic, input, input_imm) \
- TEST(mnemonic##_D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- D, D, \
- kInputDouble##input, \
- kInputDoubleImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_D(mnemonic, input, input_imm) \
+ TEST(mnemonic##_D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ D, \
+ D, \
+ kInputDouble##input, \
+ kInputDoubleImm##input_imm); \
+ }
-#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_SD(mnemonic, input, input_imm) \
- TEST(mnemonic##_S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- S, S, \
- kInputFloat##input, \
- kInputDoubleImm##input_imm); \
- } \
- DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_D(mnemonic, input, input_imm)
+#define DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_SD(mnemonic, input, input_imm) \
+ TEST(mnemonic##_S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ S, \
+ S, \
+ kInputFloat##input, \
+ kInputDoubleImm##input_imm); \
+ } \
+ DEFINE_TEST_NEON_2OPIMM_FP_SCALAR_D(mnemonic, input, input_imm)
-#define DEFINE_TEST_NEON_2OPIMM_SCALAR(mnemonic, input, input_imm) \
- TEST(mnemonic##_B_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- B, B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- H, H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(mnemonic, input, input_imm)
+#define DEFINE_TEST_NEON_2OPIMM_SCALAR(mnemonic, input, input_imm) \
+ TEST(mnemonic##_B_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ B, \
+ B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ H, \
+ H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(mnemonic, input, input_imm)
-#define DEFINE_TEST_NEON_2OPIMM_LONG(mnemonic, input, input_imm) \
- TEST(mnemonic##_8H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 8H, 8B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 4S, 4H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
- 2D, 2S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_8H_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 8H, 16B, \
- kInput8bits##input, \
- kInput8bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_4S_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 4S, 8H, \
- kInput16bits##input, \
- kInput16bitsImm##input_imm); \
- } \
- TEST(mnemonic##2_2D_2OPIMM) { \
- CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
- 2D, 4S, \
- kInput32bits##input, \
- kInput32bitsImm##input_imm); \
- }
+#define DEFINE_TEST_NEON_2OPIMM_LONG(mnemonic, input, input_imm) \
+ TEST(mnemonic##_8H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 8H, \
+ 8B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 4S, \
+ 4H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic, \
+ 2D, \
+ 2S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_8H_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 8H, \
+ 16B, \
+ kInput8bits##input, \
+ kInput8bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_4S_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 4S, \
+ 8H, \
+ kInput16bits##input, \
+ kInput16bitsImm##input_imm); \
+ } \
+ TEST(mnemonic##2_2D_2OPIMM) { \
+ CALL_TEST_NEON_HELPER_2OPIMM(mnemonic##2, \
+ 2D, \
+ 4S, \
+ kInput32bits##input, \
+ kInput32bitsImm##input_imm); \
+ }
-#define CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, \
- input_m, indices) { \
- CALL_TEST_NEON_HELPER_ByElement(mnemonic, \
- vdform, vnform, vmform, \
- input_d, input_n, \
- input_m, indices); \
- }
+#define CALL_TEST_NEON_HELPER_BYELEMENT( \
+ mnemonic, vdform, vnform, vmform, input_d, input_n, input_m, indices) \
+ { \
+ CALL_TEST_NEON_HELPER_ByElement(mnemonic, \
+ vdform, \
+ vnform, \
+ vmform, \
+ input_d, \
+ input_n, \
+ input_m, \
+ indices); \
+ }
-#define DEFINE_TEST_NEON_BYELEMENT(mnemonic, input_d, input_n, input_m) \
- TEST(mnemonic##_4H_4H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 4H, 4H, H, \
- kInput16bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##_8H_8H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 8H, 8H, H, \
- kInput16bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##_2S_2S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 2S, 2S, S, \
- kInput32bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- } \
- TEST(mnemonic##_4S_4S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 4S, 4S, S, \
- kInput32bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- }
+#define DEFINE_TEST_NEON_BYELEMENT(mnemonic, input_d, input_n, input_m) \
+ TEST(mnemonic##_4H_4H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 4H, \
+ 4H, \
+ H, \
+ kInput16bits##input_d, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##_8H_8H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 8H, \
+ 8H, \
+ H, \
+ kInput16bits##input_d, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##_2S_2S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 2S, \
+ 2S, \
+ S, \
+ kInput32bits##input_d, \
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ } \
+ TEST(mnemonic##_4S_4S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 4S, \
+ 4S, \
+ S, \
+ kInput32bits##input_d, \
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ }
-#define DEFINE_TEST_NEON_BYELEMENT_SCALAR(mnemonic, \
- input_d, input_n, input_m) \
- TEST(mnemonic##_H_H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- H, H, H, \
- kInput16bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##_S_S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- S, S, S, \
- kInput32bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- }
+#define DEFINE_TEST_NEON_BYELEMENT_SCALAR(mnemonic, input_d, input_n, input_m) \
+ TEST(mnemonic##_H_H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ H, \
+ H, \
+ H, \
+ kInput16bits##input_d, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##_S_S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ S, \
+ S, \
+ S, \
+ kInput32bits##input_d, \
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ }
-#define DEFINE_TEST_NEON_FP_BYELEMENT(mnemonic, input_d, input_n, input_m) \
- TEST(mnemonic##_2S_2S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 2S, 2S, S, \
- kInputFloat##input_d, \
- kInputFloat##input_n, \
- kInputFloat##input_m, \
- kInputSIndices); \
- } \
- TEST(mnemonic##_4S_4S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 4S, 4S, S, \
- kInputFloat##input_d, \
- kInputFloat##input_n, \
- kInputFloat##input_m, \
- kInputSIndices); \
- } \
- TEST(mnemonic##_2D_2D_D) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 2D, 2D, D, \
- kInputDouble##input_d, \
- kInputDouble##input_n, \
- kInputDouble##input_m, \
- kInputDIndices); \
- } \
+#define DEFINE_TEST_NEON_FP_BYELEMENT(mnemonic, input_d, input_n, input_m) \
+ TEST(mnemonic##_2S_2S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 2S, \
+ 2S, \
+ S, \
+ kInputFloat##input_d, \
+ kInputFloat##input_n, \
+ kInputFloat##input_m, \
+ kInputSIndices); \
+ } \
+ TEST(mnemonic##_4S_4S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 4S, \
+ 4S, \
+ S, \
+ kInputFloat##input_d, \
+ kInputFloat##input_n, \
+ kInputFloat##input_m, \
+ kInputSIndices); \
+ } \
+ TEST(mnemonic##_2D_2D_D) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ 2D, \
+ 2D, \
+ D, \
+ kInputDouble##input_d, \
+ kInputDouble##input_n, \
+ kInputDouble##input_m, \
+ kInputDIndices); \
+ }
-#define DEFINE_TEST_NEON_FP_BYELEMENT_SCALAR(mnemonic, inp_d, inp_n, inp_m) \
- TEST(mnemonic##_S_S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- S, S, S, \
- kInputFloat##inp_d, \
- kInputFloat##inp_n, \
- kInputFloat##inp_m, \
- kInputSIndices); \
- } \
- TEST(mnemonic##_D_D_D) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- D, D, D, \
- kInputDouble##inp_d, \
- kInputDouble##inp_n, \
- kInputDouble##inp_m, \
- kInputDIndices); \
- } \
+#define DEFINE_TEST_NEON_FP_BYELEMENT_SCALAR(mnemonic, inp_d, inp_n, inp_m) \
+ TEST(mnemonic##_S_S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ S, \
+ S, \
+ S, \
+ kInputFloat##inp_d, \
+ kInputFloat##inp_n, \
+ kInputFloat##inp_m, \
+ kInputSIndices); \
+ } \
+ TEST(mnemonic##_D_D_D) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ D, \
+ D, \
+ D, \
+ kInputDouble##inp_d, \
+ kInputDouble##inp_n, \
+ kInputDouble##inp_m, \
+ kInputDIndices); \
+ }
#define DEFINE_TEST_NEON_BYELEMENT_DIFF(mnemonic, input_d, input_n, input_m) \
- TEST(mnemonic##_4S_4H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 4S, 4H, H, \
- kInput32bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##2_4S_8H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic##2, \
- 4S, 8H, H, \
- kInput32bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##_2D_2S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- 2D, 2S, S, \
- kInput64bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- } \
- TEST(mnemonic##2_2D_4S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic##2, \
- 2D, 4S, S, \
- kInput64bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- }
-
-#define DEFINE_TEST_NEON_BYELEMENT_DIFF_SCALAR(mnemonic, \
- input_d, input_n, input_m) \
- TEST(mnemonic##_S_H_H) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- S, H, H, \
- kInput32bits##input_d, \
- kInput16bits##input_n, \
- kInput16bits##input_m, \
- kInputHIndices); \
- } \
- TEST(mnemonic##_D_S_S) { \
- CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
- D, S, S, \
- kInput64bits##input_d, \
- kInput32bits##input_n, \
- kInput32bits##input_m, \
- kInputSIndices); \
- }
-
-
-#define CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
- variant, \
- input_d, \
- input_imm1, \
- input_n, \
- input_imm2) { \
- CALL_TEST_NEON_HELPER_OpImmOpImm(&MacroAssembler::mnemonic, \
- mnemonic, \
- variant, variant, \
- input_d, input_imm1, \
- input_n, input_imm2); \
- }
-
-#define DEFINE_TEST_NEON_2OP2IMM(mnemonic, \
- input_d, input_imm1, \
- input_n, input_imm2) \
- TEST(mnemonic##_B) { \
- CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
- 16B, \
- kInput8bits##input_d, \
- kInput8bitsImm##input_imm1, \
- kInput8bits##input_n, \
- kInput8bitsImm##input_imm2); \
- } \
- TEST(mnemonic##_H) { \
- CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
- 8H, \
- kInput16bits##input_d, \
- kInput16bitsImm##input_imm1, \
- kInput16bits##input_n, \
- kInput16bitsImm##input_imm2); \
- } \
- TEST(mnemonic##_S) { \
- CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ TEST(mnemonic##_4S_4H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
4S, \
+ 4H, \
+ H, \
kInput32bits##input_d, \
- kInput32bitsImm##input_imm1, \
- kInput32bits##input_n, \
- kInput32bitsImm##input_imm2); \
- } \
- TEST(mnemonic##_D) { \
- CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##2_4S_8H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic##2, \
+ 4S, \
+ 8H, \
+ H, \
+ kInput32bits##input_d, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##_2D_2S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
2D, \
+ 2S, \
+ S, \
kInput64bits##input_d, \
- kInput64bitsImm##input_imm1, \
- kInput64bits##input_n, \
- kInput64bitsImm##input_imm2); \
- }
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ } \
+ TEST(mnemonic##2_2D_4S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic##2, \
+ 2D, \
+ 4S, \
+ S, \
+ kInput64bits##input_d, \
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ }
+
+#define DEFINE_TEST_NEON_BYELEMENT_DIFF_SCALAR( \
+ mnemonic, input_d, input_n, input_m) \
+ TEST(mnemonic##_S_H_H) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ S, \
+ H, \
+ H, \
+ kInput32bits##input_d, \
+ kInput16bits##input_n, \
+ kInput16bits##input_m, \
+ kInputHIndices); \
+ } \
+ TEST(mnemonic##_D_S_S) { \
+ CALL_TEST_NEON_HELPER_BYELEMENT(mnemonic, \
+ D, \
+ S, \
+ S, \
+ kInput64bits##input_d, \
+ kInput32bits##input_n, \
+ kInput32bits##input_m, \
+ kInputSIndices); \
+ }
+
+
+#define CALL_TEST_NEON_HELPER_2OP2IMM( \
+ mnemonic, variant, input_d, input_imm1, input_n, input_imm2) \
+ { \
+ CALL_TEST_NEON_HELPER_OpImmOpImm(&MacroAssembler::mnemonic, \
+ mnemonic, \
+ variant, \
+ variant, \
+ input_d, \
+ input_imm1, \
+ input_n, \
+ input_imm2); \
+ }
+
+#define DEFINE_TEST_NEON_2OP2IMM( \
+ mnemonic, input_d, input_imm1, input_n, input_imm2) \
+ TEST(mnemonic##_B) { \
+ CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ 16B, \
+ kInput8bits##input_d, \
+ kInput8bitsImm##input_imm1, \
+ kInput8bits##input_n, \
+ kInput8bitsImm##input_imm2); \
+ } \
+ TEST(mnemonic##_H) { \
+ CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ 8H, \
+ kInput16bits##input_d, \
+ kInput16bitsImm##input_imm1, \
+ kInput16bits##input_n, \
+ kInput16bitsImm##input_imm2); \
+ } \
+ TEST(mnemonic##_S) { \
+ CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ 4S, \
+ kInput32bits##input_d, \
+ kInput32bitsImm##input_imm1, \
+ kInput32bits##input_n, \
+ kInput32bitsImm##input_imm2); \
+ } \
+ TEST(mnemonic##_D) { \
+ CALL_TEST_NEON_HELPER_2OP2IMM(mnemonic, \
+ 2D, \
+ kInput64bits##input_d, \
+ kInput64bitsImm##input_imm1, \
+ kInput64bits##input_n, \
+ kInput64bitsImm##input_imm2); \
+ }
// Advanced SIMD copy.
-DEFINE_TEST_NEON_2OP2IMM(ins,
- Basic, LaneCountFromZero,
- Basic, LaneCountFromZero)
+DEFINE_TEST_NEON_2OP2IMM(
+ ins, Basic, LaneCountFromZero, Basic, LaneCountFromZero)
DEFINE_TEST_NEON_2OPIMM_COPY(dup, Basic, LaneCountFromZero)
@@ -3890,7 +4242,8 @@
DEFINE_TEST_NEON_2OPIMM_NARROW(sqshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_NARROW(sqrshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_LONG(sshll, Basic, TypeWidthFromZero)
-DEFINE_TEST_NEON_2OPIMM_SD(scvtf, FixedPointConversions, \
+DEFINE_TEST_NEON_2OPIMM_SD(scvtf,
+ FixedPointConversions,
TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM_FP(fcvtzs, Conversions, TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM(ushr, Basic, TypeWidth)
@@ -3906,7 +4259,8 @@
DEFINE_TEST_NEON_2OPIMM_NARROW(uqshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_NARROW(uqrshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_LONG(ushll, Basic, TypeWidthFromZero)
-DEFINE_TEST_NEON_2OPIMM_SD(ucvtf, FixedPointConversions, \
+DEFINE_TEST_NEON_2OPIMM_SD(ucvtf,
+ FixedPointConversions,
TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM_FP(fcvtzu, Conversions, TypeWidthFromZeroToWidth)
@@ -3920,7 +4274,8 @@
DEFINE_TEST_NEON_2OPIMM_SCALAR(sqshl, Basic, TypeWidthFromZero)
DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(sqshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(sqrshrn, Basic, TypeWidth)
-DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(scvtf, FixedPointConversions, \
+DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(scvtf,
+ FixedPointConversions,
TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM_FP_SCALAR(fcvtzs, Conversions, TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM_SCALAR_D(ushr, Basic, TypeWidth)
@@ -3935,7 +4290,8 @@
DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(sqrshrun, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(uqshrn, Basic, TypeWidth)
DEFINE_TEST_NEON_2OPIMM_SCALAR_NARROW(uqrshrn, Basic, TypeWidth)
-DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(ucvtf, FixedPointConversions, \
+DEFINE_TEST_NEON_2OPIMM_SCALAR_SD(ucvtf,
+ FixedPointConversions,
TypeWidthFromZeroToWidth)
DEFINE_TEST_NEON_2OPIMM_FP_SCALAR(fcvtzu, Conversions, TypeWidthFromZeroToWidth)
diff --git a/test/aarch64/test-simulator-inputs-aarch64.h b/test/aarch64/test-simulator-inputs-aarch64.h
index a398625..049ce23 100644
--- a/test/aarch64/test-simulator-inputs-aarch64.h
+++ b/test/aarch64/test-simulator-inputs-aarch64.h
@@ -41,6 +41,7 @@
#endif
#define VIXL_AARCH64_TEST_SIMULATOR_INPUTS_AARCH64_H_
+// clang-format off
// Double values, stored as uint64_t representations. This ensures exact bit
// representation, and avoids the loss of NaNs and suchlike through C++ casts.
@@ -849,137 +850,100 @@
0xfffffffffffffffe, \
0xffffffffffffffff
+// clang-format on
// For most 2- and 3-op instructions, use only basic inputs. Because every
// combination is tested, the length of the output trace is very sensitive to
// the length of this list.
-static const uint64_t kInputDoubleBasic[] = { INPUT_DOUBLE_BASIC };
-static const uint32_t kInputFloatBasic[] = { INPUT_FLOAT_BASIC };
+static const uint64_t kInputDoubleBasic[] = {INPUT_DOUBLE_BASIC};
+static const uint32_t kInputFloatBasic[] = {INPUT_FLOAT_BASIC};
// TODO: Define different values when the traces file is split.
#define INPUT_DOUBLE_ACC_DESTINATION INPUT_DOUBLE_BASIC
#define INPUT_FLOAT_ACC_DESTINATION INPUT_FLOAT_BASIC
static const uint64_t kInputDoubleAccDestination[] = {
- INPUT_DOUBLE_ACC_DESTINATION
-};
+ INPUT_DOUBLE_ACC_DESTINATION};
static const uint32_t kInputFloatAccDestination[] = {
- INPUT_FLOAT_ACC_DESTINATION
-};
+ INPUT_FLOAT_ACC_DESTINATION};
// For conversions, include several extra inputs.
static const uint64_t kInputDoubleConversions[] = {
- INPUT_DOUBLE_BASIC
- INPUT_DOUBLE_CONVERSIONS
-};
+ INPUT_DOUBLE_BASIC INPUT_DOUBLE_CONVERSIONS};
static const uint32_t kInputFloatConversions[] = {
- INPUT_FLOAT_BASIC
- INPUT_FLOAT_CONVERSIONS
-};
+ INPUT_FLOAT_BASIC INPUT_FLOAT_CONVERSIONS};
-static const uint64_t kInput64bitsFixedPointConversions[] = {
- INPUT_64BITS_BASIC,
- INPUT_64BITS_FIXEDPOINT_CONVERSIONS
-};
+static const uint64_t kInput64bitsFixedPointConversions[] =
+ {INPUT_64BITS_BASIC, INPUT_64BITS_FIXEDPOINT_CONVERSIONS};
-static const uint32_t kInput32bitsFixedPointConversions[] = {
- INPUT_32BITS_BASIC,
- INPUT_32BITS_FIXEDPOINT_CONVERSIONS
-};
+static const uint32_t kInput32bitsFixedPointConversions[] =
+ {INPUT_32BITS_BASIC, INPUT_32BITS_FIXEDPOINT_CONVERSIONS};
static const uint16_t kInputFloat16Conversions[] = {
- INPUT_FLOAT16_BASIC
- INPUT_FLOAT16_CONVERSIONS
-};
+ INPUT_FLOAT16_BASIC INPUT_FLOAT16_CONVERSIONS};
-static const uint8_t kInput8bitsBasic[] = {
- INPUT_8BITS_BASIC
-};
+static const uint8_t kInput8bitsBasic[] = {INPUT_8BITS_BASIC};
-static const uint16_t kInput16bitsBasic[] = {
- INPUT_16BITS_BASIC
-};
+static const uint16_t kInput16bitsBasic[] = {INPUT_16BITS_BASIC};
-static const uint32_t kInput32bitsBasic[] = {
- INPUT_32BITS_BASIC
-};
+static const uint32_t kInput32bitsBasic[] = {INPUT_32BITS_BASIC};
-static const uint64_t kInput64bitsBasic[] = {
- INPUT_64BITS_BASIC
-};
+static const uint64_t kInput64bitsBasic[] = {INPUT_64BITS_BASIC};
-static const int kInput8bitsImmTypeWidth[] = {
- INPUT_8BITS_IMM_TYPEWIDTH
-};
+static const int kInput8bitsImmTypeWidth[] = {INPUT_8BITS_IMM_TYPEWIDTH};
-static const int kInput16bitsImmTypeWidth[] = {
- INPUT_16BITS_IMM_TYPEWIDTH
-};
+static const int kInput16bitsImmTypeWidth[] = {INPUT_16BITS_IMM_TYPEWIDTH};
-static const int kInput32bitsImmTypeWidth[] = {
- INPUT_32BITS_IMM_TYPEWIDTH
-};
+static const int kInput32bitsImmTypeWidth[] = {INPUT_32BITS_IMM_TYPEWIDTH};
-static const int kInput64bitsImmTypeWidth[] = {
- INPUT_64BITS_IMM_TYPEWIDTH
-};
+static const int kInput64bitsImmTypeWidth[] = {INPUT_64BITS_IMM_TYPEWIDTH};
static const int kInput8bitsImmTypeWidthFromZero[] = {
- INPUT_8BITS_IMM_TYPEWIDTH_FROMZERO
-};
+ INPUT_8BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput16bitsImmTypeWidthFromZero[] = {
- INPUT_16BITS_IMM_TYPEWIDTH_FROMZERO
-};
+ INPUT_16BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput32bitsImmTypeWidthFromZero[] = {
- INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO
-};
+ INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput64bitsImmTypeWidthFromZero[] = {
- INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO
-};
+ INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput32bitsImmTypeWidthFromZeroToWidth[] = {
- INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH
-};
+ INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH};
static const int kInput64bitsImmTypeWidthFromZeroToWidth[] = {
- INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH
-};
+ INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH};
// These immediate values are used only in 'shll{2}' tests.
-static const int kInput8bitsImmSHLL[] = { 8 };
-static const int kInput16bitsImmSHLL[] = { 16 };
-static const int kInput32bitsImmSHLL[] = { 32 };
+static const int kInput8bitsImmSHLL[] = {8};
+static const int kInput16bitsImmSHLL[] = {16};
+static const int kInput32bitsImmSHLL[] = {32};
-static const double kInputDoubleImmZero[] = { 0.0 };
+static const double kInputDoubleImmZero[] = {0.0};
-static const int kInput8bitsImmZero[] = { 0 };
+static const int kInput8bitsImmZero[] = {0};
-static const int kInput16bitsImmZero[] = { 0 };
+static const int kInput16bitsImmZero[] = {0};
-static const int kInput32bitsImmZero[] = { 0 };
+static const int kInput32bitsImmZero[] = {0};
-static const int kInput64bitsImmZero[] = { 0 };
+static const int kInput64bitsImmZero[] = {0};
static const int kInput8bitsImmLaneCountFromZero[] = {
- INPUT_8BITS_IMM_LANECOUNT_FROMZERO
-};
+ INPUT_8BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput16bitsImmLaneCountFromZero[] = {
- INPUT_16BITS_IMM_LANECOUNT_FROMZERO
-};
+ INPUT_16BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput32bitsImmLaneCountFromZero[] = {
- INPUT_32BITS_IMM_LANECOUNT_FROMZERO
-};
+ INPUT_32BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput64bitsImmLaneCountFromZero[] = {
- INPUT_64BITS_IMM_LANECOUNT_FROMZERO
-};
+ INPUT_64BITS_IMM_LANECOUNT_FROMZERO};
// TODO: Define different values when the traces file is split.
#define INPUT_8BITS_ACC_DESTINATION INPUT_8BITS_BASIC
@@ -988,30 +952,19 @@
#define INPUT_64BITS_ACC_DESTINATION INPUT_64BITS_BASIC
static const uint8_t kInput8bitsAccDestination[] = {
- INPUT_8BITS_ACC_DESTINATION
-};
+ INPUT_8BITS_ACC_DESTINATION};
static const uint16_t kInput16bitsAccDestination[] = {
- INPUT_16BITS_ACC_DESTINATION
-};
+ INPUT_16BITS_ACC_DESTINATION};
static const uint32_t kInput32bitsAccDestination[] = {
- INPUT_32BITS_ACC_DESTINATION
-};
+ INPUT_32BITS_ACC_DESTINATION};
static const uint64_t kInput64bitsAccDestination[] = {
- INPUT_64BITS_ACC_DESTINATION
-};
+ INPUT_64BITS_ACC_DESTINATION};
-static const int kInputHIndices[] = {
- 0, 1, 2, 3,
- 4, 5, 6, 7
-};
+static const int kInputHIndices[] = {0, 1, 2, 3, 4, 5, 6, 7};
-static const int kInputSIndices[] = {
- 0, 1, 2, 3
-};
+static const int kInputSIndices[] = {0, 1, 2, 3};
-static const int kInputDIndices[] = {
- 0, 1
-};
+static const int kInputDIndices[] = {0, 1};
diff --git a/test/aarch64/test-simulator-traces-aarch64.h b/test/aarch64/test-simulator-traces-aarch64.h
index e9a9562..e374b0b 100644
--- a/test/aarch64/test-simulator-traces-aarch64.h
+++ b/test/aarch64/test-simulator-traces-aarch64.h
@@ -49,10 +49,10 @@
// ---------------------------------------------------------------------
// ADD DUMMY ARRAYS FOR NEW SIMULATOR TEST HERE.
// ---------------------------------------------------------------------
-const uint64_t kExpected_dummy_64[] = { 0 };
+const uint64_t kExpected_dummy_64[] = {0};
const size_t kExpectedCount_dummy_64 = 0;
-const uint32_t kExpected_dummy_32[] = { 0 };
+const uint32_t kExpected_dummy_32[] = {0};
const size_t kExpectedCount_dummy_32 = 0;
// ---------------------------------------------------------------------
diff --git a/test/aarch64/test-trace-aarch64.cc b/test/aarch64/test-trace-aarch64.cc
index a1a7927..673161e 100644
--- a/test/aarch64/test-trace-aarch64.cc
+++ b/test/aarch64/test-trace-aarch64.cc
@@ -48,7 +48,9 @@
#define TEST(name) TEST_(TRACE_##name)
static void GenerateTestSequenceBase(MacroAssembler* masm) {
- CodeBufferCheckScope guard(masm, masm->GetBuffer()->GetRemainingBytes());
+ ExactAssemblyScope guard(masm,
+ masm->GetBuffer()->GetRemainingBytes(),
+ ExactAssemblyScope::kMaximumSize);
__ adc(w3, w4, w5);
__ adc(x6, x7, x8);
@@ -390,7 +392,9 @@
static void GenerateTestSequenceFP(MacroAssembler* masm) {
- CodeBufferCheckScope guard(masm, masm->GetBuffer()->GetRemainingBytes());
+ ExactAssemblyScope guard(masm,
+ masm->GetBuffer()->GetRemainingBytes(),
+ ExactAssemblyScope::kMaximumSize);
// Scalar floating point instructions.
__ fabd(d13, d2, d19);
@@ -602,7 +606,9 @@
static void GenerateTestSequenceNEON(MacroAssembler* masm) {
- CodeBufferCheckScope guard(masm, masm->GetBuffer()->GetRemainingBytes());
+ ExactAssemblyScope guard(masm,
+ masm->GetBuffer()->GetRemainingBytes(),
+ ExactAssemblyScope::kMaximumSize);
// NEON integer instructions.
__ abs(d19, d0);
@@ -775,9 +781,15 @@
__ ext(v1.V16B(), v26.V16B(), v6.V16B(), 1);
__ ext(v2.V8B(), v30.V8B(), v1.V8B(), 1);
__ ld1(v18.V16B(), v19.V16B(), v20.V16B(), v21.V16B(), MemOperand(x0));
- __ ld1(v23.V16B(), v24.V16B(), v25.V16B(), v26.V16B(),
+ __ ld1(v23.V16B(),
+ v24.V16B(),
+ v25.V16B(),
+ v26.V16B(),
MemOperand(x1, x2, PostIndex));
- __ ld1(v5.V16B(), v6.V16B(), v7.V16B(), v8.V16B(),
+ __ ld1(v5.V16B(),
+ v6.V16B(),
+ v7.V16B(),
+ v8.V16B(),
MemOperand(x1, 64, PostIndex));
__ ld1(v18.V16B(), v19.V16B(), v20.V16B(), MemOperand(x0));
__ ld1(v13.V16B(), v14.V16B(), v15.V16B(), MemOperand(x1, x2, PostIndex));
@@ -789,9 +801,15 @@
__ ld1(v21.V16B(), MemOperand(x1, x2, PostIndex));
__ ld1(v4.V16B(), MemOperand(x1, 16, PostIndex));
__ ld1(v4.V1D(), v5.V1D(), v6.V1D(), v7.V1D(), MemOperand(x0));
- __ ld1(v17.V1D(), v18.V1D(), v19.V1D(), v20.V1D(),
+ __ ld1(v17.V1D(),
+ v18.V1D(),
+ v19.V1D(),
+ v20.V1D(),
MemOperand(x1, x2, PostIndex));
- __ ld1(v28.V1D(), v29.V1D(), v30.V1D(), v31.V1D(),
+ __ ld1(v28.V1D(),
+ v29.V1D(),
+ v30.V1D(),
+ v31.V1D(),
MemOperand(x1, 32, PostIndex));
__ ld1(v20.V1D(), v21.V1D(), v22.V1D(), MemOperand(x0));
__ ld1(v19.V1D(), v20.V1D(), v21.V1D(), MemOperand(x1, x2, PostIndex));
@@ -803,9 +821,15 @@
__ ld1(v11.V1D(), MemOperand(x1, x2, PostIndex));
__ ld1(v29.V1D(), MemOperand(x1, 8, PostIndex));
__ ld1(v28.V2D(), v29.V2D(), v30.V2D(), v31.V2D(), MemOperand(x0));
- __ ld1(v8.V2D(), v9.V2D(), v10.V2D(), v11.V2D(),
+ __ ld1(v8.V2D(),
+ v9.V2D(),
+ v10.V2D(),
+ v11.V2D(),
MemOperand(x1, x2, PostIndex));
- __ ld1(v14.V2D(), v15.V2D(), v16.V2D(), v17.V2D(),
+ __ ld1(v14.V2D(),
+ v15.V2D(),
+ v16.V2D(),
+ v17.V2D(),
MemOperand(x1, 64, PostIndex));
__ ld1(v26.V2D(), v27.V2D(), v28.V2D(), MemOperand(x0));
__ ld1(v5.V2D(), v6.V2D(), v7.V2D(), MemOperand(x1, x2, PostIndex));
@@ -817,9 +841,15 @@
__ ld1(v6.V2D(), MemOperand(x1, x2, PostIndex));
__ ld1(v15.V2D(), MemOperand(x1, 16, PostIndex));
__ ld1(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x0));
- __ ld1(v24.V2S(), v25.V2S(), v26.V2S(), v27.V2S(),
+ __ ld1(v24.V2S(),
+ v25.V2S(),
+ v26.V2S(),
+ v27.V2S(),
MemOperand(x1, x2, PostIndex));
- __ ld1(v27.V2S(), v28.V2S(), v29.V2S(), v30.V2S(),
+ __ ld1(v27.V2S(),
+ v28.V2S(),
+ v29.V2S(),
+ v30.V2S(),
MemOperand(x1, 32, PostIndex));
__ ld1(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x0));
__ ld1(v8.V2S(), v9.V2S(), v10.V2S(), MemOperand(x1, x2, PostIndex));
@@ -831,7 +861,10 @@
__ ld1(v0.V2S(), MemOperand(x1, x2, PostIndex));
__ ld1(v11.V2S(), MemOperand(x1, 8, PostIndex));
__ ld1(v16.V4H(), v17.V4H(), v18.V4H(), v19.V4H(), MemOperand(x0));
- __ ld1(v24.V4H(), v25.V4H(), v26.V4H(), v27.V4H(),
+ __ ld1(v24.V4H(),
+ v25.V4H(),
+ v26.V4H(),
+ v27.V4H(),
MemOperand(x1, x2, PostIndex));
__ ld1(v1.V4H(), v2.V4H(), v3.V4H(), v4.V4H(), MemOperand(x1, 32, PostIndex));
__ ld1(v30.V4H(), v31.V4H(), v0.V4H(), MemOperand(x0));
@@ -844,7 +877,10 @@
__ ld1(v1.V4H(), MemOperand(x1, x2, PostIndex));
__ ld1(v14.V4H(), MemOperand(x1, 8, PostIndex));
__ ld1(v26.V4S(), v27.V4S(), v28.V4S(), v29.V4S(), MemOperand(x0));
- __ ld1(v28.V4S(), v29.V4S(), v30.V4S(), v31.V4S(),
+ __ ld1(v28.V4S(),
+ v29.V4S(),
+ v30.V4S(),
+ v31.V4S(),
MemOperand(x1, x2, PostIndex));
__ ld1(v4.V4S(), v5.V4S(), v6.V4S(), v7.V4S(), MemOperand(x1, 64, PostIndex));
__ ld1(v2.V4S(), v3.V4S(), v4.V4S(), MemOperand(x0));
@@ -858,7 +894,10 @@
__ ld1(v0.V4S(), MemOperand(x1, 16, PostIndex));
__ ld1(v17.V8B(), v18.V8B(), v19.V8B(), v20.V8B(), MemOperand(x0));
__ ld1(v5.V8B(), v6.V8B(), v7.V8B(), v8.V8B(), MemOperand(x1, x2, PostIndex));
- __ ld1(v9.V8B(), v10.V8B(), v11.V8B(), v12.V8B(),
+ __ ld1(v9.V8B(),
+ v10.V8B(),
+ v11.V8B(),
+ v12.V8B(),
MemOperand(x1, 32, PostIndex));
__ ld1(v4.V8B(), v5.V8B(), v6.V8B(), MemOperand(x0));
__ ld1(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x1, x2, PostIndex));
@@ -871,7 +910,10 @@
__ ld1(v28.V8B(), MemOperand(x1, 8, PostIndex));
__ ld1(v5.V8H(), v6.V8H(), v7.V8H(), v8.V8H(), MemOperand(x0));
__ ld1(v2.V8H(), v3.V8H(), v4.V8H(), v5.V8H(), MemOperand(x1, x2, PostIndex));
- __ ld1(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
+ __ ld1(v10.V8H(),
+ v11.V8H(),
+ v12.V8H(),
+ v13.V8H(),
MemOperand(x1, 64, PostIndex));
__ ld1(v26.V8H(), v27.V8H(), v28.V8H(), MemOperand(x0));
__ ld1(v3.V8H(), v4.V8H(), v5.V8H(), MemOperand(x1, x2, PostIndex));
@@ -1033,34 +1075,61 @@
__ ld3r(v29.V8H(), v30.V8H(), v31.V8H(), MemOperand(x1, x2, PostIndex));
__ ld3r(v7.V8H(), v8.V8H(), v9.V8H(), MemOperand(x1, 6, PostIndex));
__ ld4(v3.V16B(), v4.V16B(), v5.V16B(), v6.V16B(), MemOperand(x0));
- __ ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
+ __ ld4(v2.V16B(),
+ v3.V16B(),
+ v4.V16B(),
+ v5.V16B(),
MemOperand(x1, x2, PostIndex));
- __ ld4(v5.V16B(), v6.V16B(), v7.V16B(), v8.V16B(),
+ __ ld4(v5.V16B(),
+ v6.V16B(),
+ v7.V16B(),
+ v8.V16B(),
MemOperand(x1, 64, PostIndex));
__ ld4(v18.V2D(), v19.V2D(), v20.V2D(), v21.V2D(), MemOperand(x0));
__ ld4(v4.V2D(), v5.V2D(), v6.V2D(), v7.V2D(), MemOperand(x1, x2, PostIndex));
- __ ld4(v29.V2D(), v30.V2D(), v31.V2D(), v0.V2D(),
+ __ ld4(v29.V2D(),
+ v30.V2D(),
+ v31.V2D(),
+ v0.V2D(),
MemOperand(x1, 64, PostIndex));
__ ld4(v27.V2S(), v28.V2S(), v29.V2S(), v30.V2S(), MemOperand(x0));
- __ ld4(v24.V2S(), v25.V2S(), v26.V2S(), v27.V2S(),
+ __ ld4(v24.V2S(),
+ v25.V2S(),
+ v26.V2S(),
+ v27.V2S(),
MemOperand(x1, x2, PostIndex));
__ ld4(v4.V2S(), v5.V2S(), v6.V2S(), v7.V2S(), MemOperand(x1, 32, PostIndex));
__ ld4(v16.V4H(), v17.V4H(), v18.V4H(), v19.V4H(), MemOperand(x0));
- __ ld4(v23.V4H(), v24.V4H(), v25.V4H(), v26.V4H(),
+ __ ld4(v23.V4H(),
+ v24.V4H(),
+ v25.V4H(),
+ v26.V4H(),
MemOperand(x1, x2, PostIndex));
__ ld4(v2.V4H(), v3.V4H(), v4.V4H(), v5.V4H(), MemOperand(x1, 32, PostIndex));
__ ld4(v7.V4S(), v8.V4S(), v9.V4S(), v10.V4S(), MemOperand(x0));
- __ ld4(v28.V4S(), v29.V4S(), v30.V4S(), v31.V4S(),
+ __ ld4(v28.V4S(),
+ v29.V4S(),
+ v30.V4S(),
+ v31.V4S(),
MemOperand(x1, x2, PostIndex));
- __ ld4(v29.V4S(), v30.V4S(), v31.V4S(), v0.V4S(),
+ __ ld4(v29.V4S(),
+ v30.V4S(),
+ v31.V4S(),
+ v0.V4S(),
MemOperand(x1, 64, PostIndex));
__ ld4(v15.V8B(), v16.V8B(), v17.V8B(), v18.V8B(), MemOperand(x0));
- __ ld4(v27.V8B(), v28.V8B(), v29.V8B(), v30.V8B(),
+ __ ld4(v27.V8B(),
+ v28.V8B(),
+ v29.V8B(),
+ v30.V8B(),
MemOperand(x1, x2, PostIndex));
__ ld4(v5.V8B(), v6.V8B(), v7.V8B(), v8.V8B(), MemOperand(x1, 32, PostIndex));
__ ld4(v25.V8H(), v26.V8H(), v27.V8H(), v28.V8H(), MemOperand(x0));
__ ld4(v2.V8H(), v3.V8H(), v4.V8H(), v5.V8H(), MemOperand(x1, x2, PostIndex));
- __ ld4(v20.V8H(), v21.V8H(), v22.V8H(), v23.V8H(),
+ __ ld4(v20.V8H(),
+ v21.V8H(),
+ v22.V8H(),
+ v23.V8H(),
MemOperand(x1, 64, PostIndex));
__ ld4(v20.B(), v21.B(), v22.B(), v23.B(), 3, MemOperand(x0));
__ ld4(v12.B(), v13.B(), v14.B(), v15.B(), 3, MemOperand(x1, x2, PostIndex));
@@ -1075,44 +1144,92 @@
__ ld4(v25.S(), v26.S(), v27.S(), v28.S(), 2, MemOperand(x1, x2, PostIndex));
__ ld4(v8.S(), v9.S(), v10.S(), v11.S(), 3, MemOperand(x1, 16, PostIndex));
__ ld4r(v14.V16B(), v15.V16B(), v16.V16B(), v17.V16B(), MemOperand(x0));
- __ ld4r(v13.V16B(), v14.V16B(), v15.V16B(), v16.V16B(),
+ __ ld4r(v13.V16B(),
+ v14.V16B(),
+ v15.V16B(),
+ v16.V16B(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v9.V16B(), v10.V16B(), v11.V16B(), v12.V16B(),
+ __ ld4r(v9.V16B(),
+ v10.V16B(),
+ v11.V16B(),
+ v12.V16B(),
MemOperand(x1, 4, PostIndex));
__ ld4r(v8.V1D(), v9.V1D(), v10.V1D(), v11.V1D(), MemOperand(x0));
- __ ld4r(v4.V1D(), v5.V1D(), v6.V1D(), v7.V1D(),
+ __ ld4r(v4.V1D(),
+ v5.V1D(),
+ v6.V1D(),
+ v7.V1D(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v26.V1D(), v27.V1D(), v28.V1D(), v29.V1D(),
+ __ ld4r(v26.V1D(),
+ v27.V1D(),
+ v28.V1D(),
+ v29.V1D(),
MemOperand(x1, 32, PostIndex));
__ ld4r(v19.V2D(), v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(x0));
- __ ld4r(v28.V2D(), v29.V2D(), v30.V2D(), v31.V2D(),
+ __ ld4r(v28.V2D(),
+ v29.V2D(),
+ v30.V2D(),
+ v31.V2D(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v15.V2D(), v16.V2D(), v17.V2D(), v18.V2D(),
+ __ ld4r(v15.V2D(),
+ v16.V2D(),
+ v17.V2D(),
+ v18.V2D(),
MemOperand(x1, 32, PostIndex));
__ ld4r(v31.V2S(), v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x0));
- __ ld4r(v28.V2S(), v29.V2S(), v30.V2S(), v31.V2S(),
+ __ ld4r(v28.V2S(),
+ v29.V2S(),
+ v30.V2S(),
+ v31.V2S(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v11.V2S(), v12.V2S(), v13.V2S(), v14.V2S(),
+ __ ld4r(v11.V2S(),
+ v12.V2S(),
+ v13.V2S(),
+ v14.V2S(),
MemOperand(x1, 16, PostIndex));
__ ld4r(v19.V4H(), v20.V4H(), v21.V4H(), v22.V4H(), MemOperand(x0));
- __ ld4r(v22.V4H(), v23.V4H(), v24.V4H(), v25.V4H(),
+ __ ld4r(v22.V4H(),
+ v23.V4H(),
+ v24.V4H(),
+ v25.V4H(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v20.V4H(), v21.V4H(), v22.V4H(), v23.V4H(),
+ __ ld4r(v20.V4H(),
+ v21.V4H(),
+ v22.V4H(),
+ v23.V4H(),
MemOperand(x1, 8, PostIndex));
__ ld4r(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(), MemOperand(x0));
- __ ld4r(v25.V4S(), v26.V4S(), v27.V4S(), v28.V4S(),
+ __ ld4r(v25.V4S(),
+ v26.V4S(),
+ v27.V4S(),
+ v28.V4S(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v23.V4S(), v24.V4S(), v25.V4S(), v26.V4S(),
+ __ ld4r(v23.V4S(),
+ v24.V4S(),
+ v25.V4S(),
+ v26.V4S(),
MemOperand(x1, 16, PostIndex));
__ ld4r(v22.V8B(), v23.V8B(), v24.V8B(), v25.V8B(), MemOperand(x0));
- __ ld4r(v27.V8B(), v28.V8B(), v29.V8B(), v30.V8B(),
+ __ ld4r(v27.V8B(),
+ v28.V8B(),
+ v29.V8B(),
+ v30.V8B(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v29.V8B(), v30.V8B(), v31.V8B(), v0.V8B(),
+ __ ld4r(v29.V8B(),
+ v30.V8B(),
+ v31.V8B(),
+ v0.V8B(),
MemOperand(x1, 4, PostIndex));
__ ld4r(v28.V8H(), v29.V8H(), v30.V8H(), v31.V8H(), MemOperand(x0));
- __ ld4r(v25.V8H(), v26.V8H(), v27.V8H(), v28.V8H(),
+ __ ld4r(v25.V8H(),
+ v26.V8H(),
+ v27.V8H(),
+ v28.V8H(),
MemOperand(x1, x2, PostIndex));
- __ ld4r(v22.V8H(), v23.V8H(), v24.V8H(), v25.V8H(),
+ __ ld4r(v22.V8H(),
+ v23.V8H(),
+ v24.V8H(),
+ v25.V8H(),
MemOperand(x1, 8, PostIndex));
__ mla(v29.V16B(), v7.V16B(), v26.V16B());
__ mla(v6.V2S(), v4.V2S(), v14.V2S());
@@ -1675,9 +1792,15 @@
__ ssubw2(v31.V4S(), v11.V4S(), v15.V8H());
__ ssubw2(v4.V8H(), v8.V8H(), v16.V16B());
__ st1(v18.V16B(), v19.V16B(), v20.V16B(), v21.V16B(), MemOperand(x0));
- __ st1(v10.V16B(), v11.V16B(), v12.V16B(), v13.V16B(),
+ __ st1(v10.V16B(),
+ v11.V16B(),
+ v12.V16B(),
+ v13.V16B(),
MemOperand(x1, x2, PostIndex));
- __ st1(v27.V16B(), v28.V16B(), v29.V16B(), v30.V16B(),
+ __ st1(v27.V16B(),
+ v28.V16B(),
+ v29.V16B(),
+ v30.V16B(),
MemOperand(x1, 64, PostIndex));
__ st1(v16.V16B(), v17.V16B(), v18.V16B(), MemOperand(x0));
__ st1(v21.V16B(), v22.V16B(), v23.V16B(), MemOperand(x1, x2, PostIndex));
@@ -1689,9 +1812,15 @@
__ st1(v28.V16B(), MemOperand(x1, x2, PostIndex));
__ st1(v2.V16B(), MemOperand(x1, 16, PostIndex));
__ st1(v29.V1D(), v30.V1D(), v31.V1D(), v0.V1D(), MemOperand(x0));
- __ st1(v12.V1D(), v13.V1D(), v14.V1D(), v15.V1D(),
+ __ st1(v12.V1D(),
+ v13.V1D(),
+ v14.V1D(),
+ v15.V1D(),
MemOperand(x1, x2, PostIndex));
- __ st1(v30.V1D(), v31.V1D(), v0.V1D(), v1.V1D(),
+ __ st1(v30.V1D(),
+ v31.V1D(),
+ v0.V1D(),
+ v1.V1D(),
MemOperand(x1, 32, PostIndex));
__ st1(v16.V1D(), v17.V1D(), v18.V1D(), MemOperand(x0));
__ st1(v3.V1D(), v4.V1D(), v5.V1D(), MemOperand(x1, x2, PostIndex));
@@ -1703,9 +1832,15 @@
__ st1(v27.V1D(), MemOperand(x1, x2, PostIndex));
__ st1(v23.V1D(), MemOperand(x1, 8, PostIndex));
__ st1(v2.V2D(), v3.V2D(), v4.V2D(), v5.V2D(), MemOperand(x0));
- __ st1(v22.V2D(), v23.V2D(), v24.V2D(), v25.V2D(),
+ __ st1(v22.V2D(),
+ v23.V2D(),
+ v24.V2D(),
+ v25.V2D(),
MemOperand(x1, x2, PostIndex));
- __ st1(v28.V2D(), v29.V2D(), v30.V2D(), v31.V2D(),
+ __ st1(v28.V2D(),
+ v29.V2D(),
+ v30.V2D(),
+ v31.V2D(),
MemOperand(x1, 64, PostIndex));
__ st1(v17.V2D(), v18.V2D(), v19.V2D(), MemOperand(x0));
__ st1(v16.V2D(), v17.V2D(), v18.V2D(), MemOperand(x1, x2, PostIndex));
@@ -1717,9 +1852,15 @@
__ st1(v29.V2D(), MemOperand(x1, x2, PostIndex));
__ st1(v20.V2D(), MemOperand(x1, 16, PostIndex));
__ st1(v22.V2S(), v23.V2S(), v24.V2S(), v25.V2S(), MemOperand(x0));
- __ st1(v8.V2S(), v9.V2S(), v10.V2S(), v11.V2S(),
+ __ st1(v8.V2S(),
+ v9.V2S(),
+ v10.V2S(),
+ v11.V2S(),
MemOperand(x1, x2, PostIndex));
- __ st1(v15.V2S(), v16.V2S(), v17.V2S(), v18.V2S(),
+ __ st1(v15.V2S(),
+ v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
MemOperand(x1, 32, PostIndex));
__ st1(v2.V2S(), v3.V2S(), v4.V2S(), MemOperand(x0));
__ st1(v23.V2S(), v24.V2S(), v25.V2S(), MemOperand(x1, x2, PostIndex));
@@ -1731,9 +1872,15 @@
__ st1(v11.V2S(), MemOperand(x1, x2, PostIndex));
__ st1(v17.V2S(), MemOperand(x1, 8, PostIndex));
__ st1(v6.V4H(), v7.V4H(), v8.V4H(), v9.V4H(), MemOperand(x0));
- __ st1(v9.V4H(), v10.V4H(), v11.V4H(), v12.V4H(),
+ __ st1(v9.V4H(),
+ v10.V4H(),
+ v11.V4H(),
+ v12.V4H(),
MemOperand(x1, x2, PostIndex));
- __ st1(v25.V4H(), v26.V4H(), v27.V4H(), v28.V4H(),
+ __ st1(v25.V4H(),
+ v26.V4H(),
+ v27.V4H(),
+ v28.V4H(),
MemOperand(x1, 32, PostIndex));
__ st1(v11.V4H(), v12.V4H(), v13.V4H(), MemOperand(x0));
__ st1(v10.V4H(), v11.V4H(), v12.V4H(), MemOperand(x1, x2, PostIndex));
@@ -1745,7 +1892,10 @@
__ st1(v8.V4H(), MemOperand(x1, x2, PostIndex));
__ st1(v30.V4H(), MemOperand(x1, 8, PostIndex));
__ st1(v3.V4S(), v4.V4S(), v5.V4S(), v6.V4S(), MemOperand(x0));
- __ st1(v25.V4S(), v26.V4S(), v27.V4S(), v28.V4S(),
+ __ st1(v25.V4S(),
+ v26.V4S(),
+ v27.V4S(),
+ v28.V4S(),
MemOperand(x1, x2, PostIndex));
__ st1(v5.V4S(), v6.V4S(), v7.V4S(), v8.V4S(), MemOperand(x1, 64, PostIndex));
__ st1(v31.V4S(), v0.V4S(), v1.V4S(), MemOperand(x0));
@@ -1758,9 +1908,15 @@
__ st1(v15.V4S(), MemOperand(x1, x2, PostIndex));
__ st1(v13.V4S(), MemOperand(x1, 16, PostIndex));
__ st1(v26.V8B(), v27.V8B(), v28.V8B(), v29.V8B(), MemOperand(x0));
- __ st1(v10.V8B(), v11.V8B(), v12.V8B(), v13.V8B(),
+ __ st1(v10.V8B(),
+ v11.V8B(),
+ v12.V8B(),
+ v13.V8B(),
MemOperand(x1, x2, PostIndex));
- __ st1(v15.V8B(), v16.V8B(), v17.V8B(), v18.V8B(),
+ __ st1(v15.V8B(),
+ v16.V8B(),
+ v17.V8B(),
+ v18.V8B(),
MemOperand(x1, 32, PostIndex));
__ st1(v19.V8B(), v20.V8B(), v21.V8B(), MemOperand(x0));
__ st1(v31.V8B(), v0.V8B(), v1.V8B(), MemOperand(x1, x2, PostIndex));
@@ -1773,7 +1929,10 @@
__ st1(v31.V8B(), MemOperand(x1, 8, PostIndex));
__ st1(v4.V8H(), v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x0));
__ st1(v3.V8H(), v4.V8H(), v5.V8H(), v6.V8H(), MemOperand(x1, x2, PostIndex));
- __ st1(v26.V8H(), v27.V8H(), v28.V8H(), v29.V8H(),
+ __ st1(v26.V8H(),
+ v27.V8H(),
+ v28.V8H(),
+ v29.V8H(),
MemOperand(x1, 64, PostIndex));
__ st1(v10.V8H(), v11.V8H(), v12.V8H(), MemOperand(x0));
__ st1(v21.V8H(), v22.V8H(), v23.V8H(), MemOperand(x1, x2, PostIndex));
@@ -1863,37 +2022,73 @@
__ st3(v11.S(), v12.S(), v13.S(), 1, MemOperand(x1, x2, PostIndex));
__ st3(v15.S(), v16.S(), v17.S(), 0, MemOperand(x1, 12, PostIndex));
__ st4(v22.V16B(), v23.V16B(), v24.V16B(), v25.V16B(), MemOperand(x0));
- __ st4(v24.V16B(), v25.V16B(), v26.V16B(), v27.V16B(),
+ __ st4(v24.V16B(),
+ v25.V16B(),
+ v26.V16B(),
+ v27.V16B(),
MemOperand(x1, x2, PostIndex));
- __ st4(v15.V16B(), v16.V16B(), v17.V16B(), v18.V16B(),
+ __ st4(v15.V16B(),
+ v16.V16B(),
+ v17.V16B(),
+ v18.V16B(),
MemOperand(x1, 64, PostIndex));
__ st4(v16.V2D(), v17.V2D(), v18.V2D(), v19.V2D(), MemOperand(x0));
- __ st4(v17.V2D(), v18.V2D(), v19.V2D(), v20.V2D(),
+ __ st4(v17.V2D(),
+ v18.V2D(),
+ v19.V2D(),
+ v20.V2D(),
MemOperand(x1, x2, PostIndex));
- __ st4(v9.V2D(), v10.V2D(), v11.V2D(), v12.V2D(),
+ __ st4(v9.V2D(),
+ v10.V2D(),
+ v11.V2D(),
+ v12.V2D(),
MemOperand(x1, 64, PostIndex));
__ st4(v23.V2S(), v24.V2S(), v25.V2S(), v26.V2S(), MemOperand(x0));
- __ st4(v15.V2S(), v16.V2S(), v17.V2S(), v18.V2S(),
+ __ st4(v15.V2S(),
+ v16.V2S(),
+ v17.V2S(),
+ v18.V2S(),
MemOperand(x1, x2, PostIndex));
- __ st4(v24.V2S(), v25.V2S(), v26.V2S(), v27.V2S(),
+ __ st4(v24.V2S(),
+ v25.V2S(),
+ v26.V2S(),
+ v27.V2S(),
MemOperand(x1, 32, PostIndex));
__ st4(v14.V4H(), v15.V4H(), v16.V4H(), v17.V4H(), MemOperand(x0));
- __ st4(v18.V4H(), v19.V4H(), v20.V4H(), v21.V4H(),
+ __ st4(v18.V4H(),
+ v19.V4H(),
+ v20.V4H(),
+ v21.V4H(),
MemOperand(x1, x2, PostIndex));
__ st4(v1.V4H(), v2.V4H(), v3.V4H(), v4.V4H(), MemOperand(x1, 32, PostIndex));
__ st4(v13.V4S(), v14.V4S(), v15.V4S(), v16.V4S(), MemOperand(x0));
__ st4(v6.V4S(), v7.V4S(), v8.V4S(), v9.V4S(), MemOperand(x1, x2, PostIndex));
- __ st4(v15.V4S(), v16.V4S(), v17.V4S(), v18.V4S(),
+ __ st4(v15.V4S(),
+ v16.V4S(),
+ v17.V4S(),
+ v18.V4S(),
MemOperand(x1, 64, PostIndex));
__ st4(v26.V8B(), v27.V8B(), v28.V8B(), v29.V8B(), MemOperand(x0));
- __ st4(v25.V8B(), v26.V8B(), v27.V8B(), v28.V8B(),
+ __ st4(v25.V8B(),
+ v26.V8B(),
+ v27.V8B(),
+ v28.V8B(),
MemOperand(x1, x2, PostIndex));
- __ st4(v19.V8B(), v20.V8B(), v21.V8B(), v22.V8B(),
+ __ st4(v19.V8B(),
+ v20.V8B(),
+ v21.V8B(),
+ v22.V8B(),
MemOperand(x1, 32, PostIndex));
__ st4(v19.V8H(), v20.V8H(), v21.V8H(), v22.V8H(), MemOperand(x0));
- __ st4(v15.V8H(), v16.V8H(), v17.V8H(), v18.V8H(),
+ __ st4(v15.V8H(),
+ v16.V8H(),
+ v17.V8H(),
+ v18.V8H(),
MemOperand(x1, x2, PostIndex));
- __ st4(v31.V8H(), v0.V8H(), v1.V8H(), v2.V8H(),
+ __ st4(v31.V8H(),
+ v0.V8H(),
+ v1.V8H(),
+ v2.V8H(),
MemOperand(x1, 64, PostIndex));
__ st4(v0.B(), v1.B(), v2.B(), v3.B(), 13, MemOperand(x0));
__ st4(v4.B(), v5.B(), v6.B(), v7.B(), 10, MemOperand(x1, x2, PostIndex));
@@ -1938,7 +2133,11 @@
__ sxtl2(v6.V2D(), v7.V4S());
__ sxtl2(v9.V4S(), v27.V8H());
__ sxtl2(v16.V8H(), v16.V16B());
- __ tbl(v25.V16B(), v17.V16B(), v18.V16B(), v19.V16B(), v20.V16B(),
+ __ tbl(v25.V16B(),
+ v17.V16B(),
+ v18.V16B(),
+ v19.V16B(),
+ v20.V16B(),
v22.V16B());
__ tbl(v28.V16B(), v13.V16B(), v14.V16B(), v15.V16B(), v4.V16B());
__ tbl(v3.V16B(), v0.V16B(), v1.V16B(), v2.V16B());
@@ -2312,7 +2511,9 @@
static void GenerateTestSequenceNEONFP(MacroAssembler* masm) {
- CodeBufferCheckScope guard(masm, masm->GetBuffer()->GetRemainingBytes());
+ ExactAssemblyScope guard(masm,
+ masm->GetBuffer()->GetRemainingBytes(),
+ ExactAssemblyScope::kMaximumSize);
// NEON floating point instructions.
__ fabd(v3.V2D(), v25.V2D(), v8.V2D());
@@ -2539,9 +2740,9 @@
static void MaskAddresses(const char* trace) {
- // Hexadecimal expressions of the form `\xab` do not work out-of-the box with
- // BSD `sed`. So we use ANSI-C quoting to have the regular expressions below
- // work both on Linux and BSD (and macOS).
+// Hexadecimal expressions of the form `\xab` do not work out-of-the box with
+// BSD `sed`. So we use ANSI-C quoting to have the regular expressions below
+// work both on Linux and BSD (and macOS).
#ifdef __APPLE__
#define MAYBE_ANSI_C_QUOTE "$"
#define HEX(val) "\\x" #val
@@ -2557,26 +2758,29 @@
struct {
const char* search;
const char* replace;
- } patterns[] = {
- // Mask registers that hold addresses that change from run to run.
- {"((x0|x1|x2|sp): " COLOUR "0x)[0-9a-f]{16}", ESCAPE(1) "~~~~~~~~~~~~~~~~"},
- // Mask accessed memory addresses.
- {"((<-|->) " COLOUR "0x)[0-9a-f]{16}", ESCAPE(1) "~~~~~~~~~~~~~~~~"},
- // Mask instruction addresses.
- {"^0x[0-9a-f]{16}", "0x~~~~~~~~~~~~~~~~"},
- // Mask branch targets.
- {"(Branch" COLOUR " to 0x)[0-9a-f]{16}", ESCAPE(1) "~~~~~~~~~~~~~~~~"},
- {"addr 0x[0-9a-f]+", "addr 0x~~~~~~~~~~~~~~~~"}
- };
+ } patterns[] =
+ {// Mask registers that hold addresses that change from run to run.
+ {"((x0|x1|x2|sp): " COLOUR "0x)[0-9a-f]{16}",
+ ESCAPE(1) "~~~~~~~~~~~~~~~~"},
+ // Mask accessed memory addresses.
+ {"((<-|->) " COLOUR "0x)[0-9a-f]{16}", ESCAPE(1) "~~~~~~~~~~~~~~~~"},
+ // Mask instruction addresses.
+ {"^0x[0-9a-f]{16}", "0x~~~~~~~~~~~~~~~~"},
+ // Mask branch targets.
+ {"(Branch" COLOUR " to 0x)[0-9a-f]{16}", ESCAPE(1) "~~~~~~~~~~~~~~~~"},
+ {"addr 0x[0-9a-f]+", "addr 0x~~~~~~~~~~~~~~~~"}};
const size_t patterns_length = sizeof(patterns) / sizeof(patterns[0]);
// Rewrite `trace`, masking addresses and other values that legitimately vary
// from run to run.
char command[1024];
for (size_t i = 0; i < patterns_length; i++) {
- size_t length =
- snprintf(command, sizeof(command),
- "sed %s " MAYBE_ANSI_C_QUOTE "'s/%s/%s/' '%s'",
- sed_options, patterns[i].search, patterns[i].replace, trace);
+ size_t length = snprintf(command,
+ sizeof(command),
+ "sed %s " MAYBE_ANSI_C_QUOTE "'s/%s/%s/' '%s'",
+ sed_options,
+ patterns[i].search,
+ patterns[i].replace,
+ trace);
VIXL_CHECK(length < sizeof(command));
VIXL_CHECK(system(command) == 0);
}
@@ -2601,8 +2805,7 @@
const int kScratchSize = 64 * KBytes;
const int kScratchGuardSize = 128;
char scratch_buffer[kScratchSize + kScratchGuardSize];
- for (size_t i = 0;
- i < (sizeof(scratch_buffer) / sizeof(scratch_buffer[0]));
+ for (size_t i = 0; i < (sizeof(scratch_buffer) / sizeof(scratch_buffer[0]));
i++) {
scratch_buffer[i] = i & 0xff;
}
@@ -2611,7 +2814,7 @@
// Used for pre-/post-index addressing.
simulator.WriteRegister(1, scratch_buffer);
- const int kPostIndexRegisterStep = 13; // Arbitrary interesting value.
+ const int kPostIndexRegisterStep = 13; // Arbitrary interesting value.
// Used for post-index offsets.
simulator.WriteRegister(2, kPostIndexRegisterStep);
@@ -2666,8 +2869,11 @@
} else {
// Check trace_stream against ref_file.
char command[1024];
- size_t length = snprintf(command, sizeof(command),
- "diff -u %s %s", ref_file, trace_stream_filename);
+ size_t length = snprintf(command,
+ sizeof(command),
+ "diff -u %s %s",
+ ref_file,
+ trace_stream_filename);
VIXL_CHECK(length < sizeof(command));
trace_matched_reference = (system(command) == 0);
}
@@ -2687,44 +2893,24 @@
#define REF(name) "test/test-trace-reference/" name
// Test individual options.
-TEST(disasm) {
- TraceTestHelper(false, LOG_DISASM, REF("log-disasm"));
-}
-TEST(regs) {
- TraceTestHelper(false, LOG_REGS, REF("log-regs"));
-}
-TEST(vregs) {
- TraceTestHelper(false, LOG_VREGS, REF("log-vregs"));
-}
-TEST(sysregs) {
- TraceTestHelper(false, LOG_SYSREGS, REF("log-sysregs"));
-}
-TEST(write) {
- TraceTestHelper(false, LOG_WRITE, REF("log-write"));
-}
-TEST(branch) {
- TraceTestHelper(false, LOG_WRITE, REF("log-branch"));
-}
+TEST(disasm) { TraceTestHelper(false, LOG_DISASM, REF("log-disasm")); }
+TEST(regs) { TraceTestHelper(false, LOG_REGS, REF("log-regs")); }
+TEST(vregs) { TraceTestHelper(false, LOG_VREGS, REF("log-vregs")); }
+TEST(sysregs) { TraceTestHelper(false, LOG_SYSREGS, REF("log-sysregs")); }
+TEST(write) { TraceTestHelper(false, LOG_WRITE, REF("log-write")); }
+TEST(branch) { TraceTestHelper(false, LOG_WRITE, REF("log-branch")); }
// Test standard combinations.
-TEST(none) {
- TraceTestHelper(false, LOG_NONE, REF("log-none"));
-}
-TEST(state) {
- TraceTestHelper(false, LOG_STATE, REF("log-state"));
-}
-TEST(all) {
- TraceTestHelper(false, LOG_ALL, REF("log-all"));
-}
+TEST(none) { TraceTestHelper(false, LOG_NONE, REF("log-none")); }
+TEST(state) { TraceTestHelper(false, LOG_STATE, REF("log-state")); }
+TEST(all) { TraceTestHelper(false, LOG_ALL, REF("log-all")); }
// Test individual options (with colour).
TEST(disasm_colour) {
TraceTestHelper(true, LOG_DISASM, REF("log-disasm-colour"));
}
-TEST(regs_colour) {
- TraceTestHelper(true, LOG_REGS, REF("log-regs-colour"));
-}
+TEST(regs_colour) { TraceTestHelper(true, LOG_REGS, REF("log-regs-colour")); }
TEST(vregs_colour) {
TraceTestHelper(true, LOG_VREGS, REF("log-vregs-colour"));
}
@@ -2739,15 +2925,11 @@
}
// Test standard combinations (with colour).
-TEST(none_colour) {
- TraceTestHelper(true, LOG_NONE, REF("log-none-colour"));
-}
+TEST(none_colour) { TraceTestHelper(true, LOG_NONE, REF("log-none-colour")); }
TEST(state_colour) {
TraceTestHelper(true, LOG_STATE, REF("log-state-colour"));
}
-TEST(all_colour) {
- TraceTestHelper(true, LOG_ALL, REF("log-all-colour"));
-}
+TEST(all_colour) { TraceTestHelper(true, LOG_ALL, REF("log-all-colour")); }
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
diff --git a/test/aarch64/test-utils-aarch64.cc b/test/aarch64/test-utils-aarch64.cc
index 881f18d..eafe273 100644
--- a/test/aarch64/test-utils-aarch64.cc
+++ b/test/aarch64/test-utils-aarch64.cc
@@ -42,8 +42,7 @@
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
-const double kFP64SignallingNaN =
- RawbitsToDouble(UINT64_C(0x7ff000007f800001));
+const double kFP64SignallingNaN = RawbitsToDouble(UINT64_C(0x7ff000007f800001));
const float kFP32SignallingNaN = RawbitsToFloat(0x7f800001);
// A similar value, but as a quiet NaN.
@@ -54,7 +53,8 @@
bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
if (result != expected) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
- expected, result);
+ expected,
+ result);
}
return expected == result;
@@ -64,7 +64,8 @@
bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
if (result != expected) {
printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- expected, result);
+ expected,
+ result);
}
return expected == result;
@@ -73,9 +74,13 @@
bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
if ((result.h != expected.h) || (result.l != expected.l)) {
- printf("Expected 0x%016" PRIx64 "%016" PRIx64 "\t "
+ printf("Expected 0x%016" PRIx64 "%016" PRIx64
+ "\t "
"Found 0x%016" PRIx64 "%016" PRIx64 "\n",
- expected.h, expected.l, result.h, result.l);
+ expected.h,
+ expected.l,
+ result.h,
+ result.l);
}
return ((expected.h == result.h) && (expected.l == result.l));
@@ -88,12 +93,16 @@
} else {
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
- FloatToRawbits(expected), FloatToRawbits(result));
+ FloatToRawbits(expected),
+ FloatToRawbits(result));
} else {
- printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ printf("Expected %.9f (0x%08" PRIx32
+ ")\t "
"Found %.9f (0x%08" PRIx32 ")\n",
- expected, FloatToRawbits(expected),
- result, FloatToRawbits(result));
+ expected,
+ FloatToRawbits(expected),
+ result,
+ FloatToRawbits(result));
}
return false;
}
@@ -107,12 +116,16 @@
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- DoubleToRawbits(expected), DoubleToRawbits(result));
+ DoubleToRawbits(expected),
+ DoubleToRawbits(result));
} else {
- printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ printf("Expected %.17f (0x%016" PRIx64
+ ")\t "
"Found %.17f (0x%016" PRIx64 ")\n",
- expected, DoubleToRawbits(expected),
- result, DoubleToRawbits(result));
+ expected,
+ DoubleToRawbits(expected),
+ result,
+ DoubleToRawbits(result));
}
return false;
}
@@ -125,7 +138,8 @@
int64_t result_x = core->xreg(reg.GetCode());
if ((result_x & 0xffffffff00000000) != 0) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
- expected, result_x);
+ expected,
+ result_x);
return false;
}
uint32_t result_w = core->wreg(reg.GetCode());
@@ -160,7 +174,9 @@
uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
if ((result_64 & 0xffffffff00000000) != 0) {
printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
- FloatToRawbits(expected), expected, result_64);
+ FloatToRawbits(expected),
+ expected,
+ result_64);
return false;
}
@@ -195,24 +211,16 @@
}
-static char FlagN(uint32_t flags) {
- return (flags & NFlag) ? 'N' : 'n';
-}
+static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
-static char FlagZ(uint32_t flags) {
- return (flags & ZFlag) ? 'Z' : 'z';
-}
+static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
-static char FlagC(uint32_t flags) {
- return (flags & CFlag) ? 'C' : 'c';
-}
+static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
-static char FlagV(uint32_t flags) {
- return (flags & VFlag) ? 'V' : 'v';
-}
+static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
bool EqualNzcv(uint32_t expected, uint32_t result) {
@@ -220,8 +228,14 @@
VIXL_ASSERT((result & ~NZCVFlag) == 0);
if (result != expected) {
printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
- FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
- FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ FlagN(expected),
+ FlagZ(expected),
+ FlagC(expected),
+ FlagV(expected),
+ FlagN(result),
+ FlagZ(result),
+ FlagC(result),
+ FlagV(result));
return false;
}
@@ -233,7 +247,9 @@
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
if (a->xreg(i) != b->xreg(i)) {
printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- i, a->xreg(i), b->xreg(i));
+ i,
+ a->xreg(i),
+ b->xreg(i));
return false;
}
}
@@ -243,7 +259,9 @@
uint64_t b_bits = b->dreg_bits(i);
if (a_bits != b_bits) {
printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
- i, a_bits, b_bits);
+ i,
+ a_bits,
+ b_bits);
return false;
}
}
@@ -252,8 +270,12 @@
}
-RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
- int reg_size, int reg_count, RegList allowed) {
+RegList PopulateRegisterArray(Register* w,
+ Register* x,
+ Register* r,
+ int reg_size,
+ int reg_count,
+ RegList allowed) {
RegList list = 0;
int i = 0;
for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
@@ -279,8 +301,12 @@
}
-RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
- int reg_size, int reg_count, RegList allowed) {
+RegList PopulateFPRegisterArray(FPRegister* s,
+ FPRegister* d,
+ FPRegister* v,
+ int reg_size,
+ int reg_count,
+ RegList allowed) {
RegList list = 0;
int i = 0;
for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
@@ -403,35 +429,40 @@
// Dump X registers.
__ Add(dump, dump_base, x_offset);
for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
- __ Stp(Register::GetXRegFromCode(i), Register::GetXRegFromCode(i + 1),
+ __ Stp(Register::GetXRegFromCode(i),
+ Register::GetXRegFromCode(i + 1),
MemOperand(dump, i * kXRegSizeInBytes));
}
// Dump W registers.
__ Add(dump, dump_base, w_offset);
for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
- __ Stp(Register::GetWRegFromCode(i), Register::GetWRegFromCode(i + 1),
+ __ Stp(Register::GetWRegFromCode(i),
+ Register::GetWRegFromCode(i + 1),
MemOperand(dump, i * kWRegSizeInBytes));
}
// Dump D registers.
__ Add(dump, dump_base, d_offset);
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
- __ Stp(FPRegister::GetDRegFromCode(i), FPRegister::GetDRegFromCode(i + 1),
+ __ Stp(FPRegister::GetDRegFromCode(i),
+ FPRegister::GetDRegFromCode(i + 1),
MemOperand(dump, i * kDRegSizeInBytes));
}
// Dump S registers.
__ Add(dump, dump_base, s_offset);
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
- __ Stp(FPRegister::GetSRegFromCode(i), FPRegister::GetSRegFromCode(i + 1),
+ __ Stp(FPRegister::GetSRegFromCode(i),
+ FPRegister::GetSRegFromCode(i + 1),
MemOperand(dump, i * kSRegSizeInBytes));
}
// Dump Q registers.
__ Add(dump, dump_base, q_offset);
for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
- __ Stp(VRegister::GetQRegFromCode(i), VRegister::GetQRegFromCode(i + 1),
+ __ Stp(VRegister::GetQRegFromCode(i),
+ VRegister::GetQRegFromCode(i + 1),
MemOperand(dump, i * kQRegSizeInBytes));
}
diff --git a/test/aarch64/test-utils-aarch64.h b/test/aarch64/test-utils-aarch64.h
index b33c072..f12bdb0 100644
--- a/test/aarch64/test-utils-aarch64.h
+++ b/test/aarch64/test-utils-aarch64.h
@@ -113,9 +113,7 @@
return RawbitsToDouble(dreg_bits(code));
}
- inline vec128_t qreg(unsigned code) const {
- return dump_.q_[code];
- }
+ inline vec128_t qreg(unsigned code) const { return dump_.q_[code]; }
// Stack pointer accessors.
inline int64_t spreg() const {
@@ -135,9 +133,7 @@
return dump_.flags_ & Flags_mask;
}
- inline bool IsComplete() const {
- return completed_;
- }
+ inline bool IsComplete() const { return completed_; }
private:
// Indicate whether the dump operation has been completed.
@@ -202,17 +198,24 @@
bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
-bool Equal64(uint64_t expected, const RegisterDump* core, const VRegister& vreg);
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
+ const VRegister& vreg);
-bool EqualFP32(float expected, const RegisterDump* core,
+bool EqualFP32(float expected,
+ const RegisterDump* core,
const FPRegister& fpreg);
-bool EqualFP64(double expected, const RegisterDump* core,
+bool EqualFP64(double expected,
+ const RegisterDump* core,
const FPRegister& fpreg);
-bool Equal64(const Register& reg0, const RegisterDump* core,
+bool Equal64(const Register& reg0,
+ const RegisterDump* core,
const Register& reg1);
-bool Equal128(uint64_t expected_h, uint64_t expected_l,
- const RegisterDump* core, const VRegister& reg);
+bool Equal128(uint64_t expected_h,
+ uint64_t expected_l,
+ const RegisterDump* core,
+ const VRegister& reg);
bool EqualNzcv(uint32_t expected, uint32_t result);
@@ -228,12 +231,20 @@
// Any of w, x, or r can be NULL if they are not required.
//
// The return value is a RegList indicating which registers were allocated.
-RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
- int reg_size, int reg_count, RegList allowed);
+RegList PopulateRegisterArray(Register* w,
+ Register* x,
+ Register* r,
+ int reg_size,
+ int reg_count,
+ RegList allowed);
// As PopulateRegisterArray, but for floating-point registers.
-RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
- int reg_size, int reg_count, RegList allowed);
+RegList PopulateFPRegisterArray(FPRegister* s,
+ FPRegister* d,
+ FPRegister* v,
+ int reg_size,
+ int reg_count,
+ RegList allowed);
// Ovewrite the contents of the specified registers. This enables tests to
// check that register contents are written in cases where it's likely that the
@@ -243,11 +254,13 @@
// registers, a subsequent write into an aliased W register should clear the
// top word anyway, so clobbering the full X registers should make tests more
// rigorous.
-void Clobber(MacroAssembler* masm, RegList reg_list,
+void Clobber(MacroAssembler* masm,
+ RegList reg_list,
uint64_t const value = 0xfedcba9876543210);
// As Clobber, but for FP registers.
-void ClobberFP(MacroAssembler* masm, RegList reg_list,
+void ClobberFP(MacroAssembler* masm,
+ RegList reg_list,
double const value = kFP64SignallingNaN);
// As Clobber, but for a CPURegList with either FP or integer registers. When
diff --git a/test/test-aborts.cc b/test/test-aborts.cc
index 47a83a5..e0c39cf 100644
--- a/test/test-aborts.cc
+++ b/test/test-aborts.cc
@@ -37,21 +37,24 @@
namespace vixl {
-#define TEST(name, code, expected_prefix) \
- TEST_(ABORTS_##name) { \
- try { \
- code; \
- printf("\n%s:%d\nNo exception raised.\n", __FILE__, __LINE__); \
- abort(); \
- } catch (std::runtime_error e) { \
- size_t prefix_length = strlen(expected_prefix); \
- if (strncmp(expected_prefix, e.what(), prefix_length) != 0) { \
- printf("\n%s:%d\nFound:\n%sExpected:\n%s...\n", \
- __FILE__, __LINE__, e.what(), expected_prefix); \
- abort(); \
- } \
- } \
- }
+#define TEST(name, code, expected_prefix) \
+ TEST_(ABORTS_##name) { \
+ try { \
+ code; \
+ printf("\n%s:%d\nNo exception raised.\n", __FILE__, __LINE__); \
+ abort(); \
+ } catch (std::runtime_error e) { \
+ size_t prefix_length = strlen(expected_prefix); \
+ if (strncmp(expected_prefix, e.what(), prefix_length) != 0) { \
+ printf("\n%s:%d\nFound:\n%sExpected:\n%s...\n", \
+ __FILE__, \
+ __LINE__, \
+ e.what(), \
+ expected_prefix); \
+ abort(); \
+ } \
+ } \
+ }
TEST(abort, VIXL_ABORT(), "Aborting in ")
TEST(abort_with_msg, VIXL_ABORT_WITH_MSG("message\n"), "message\nin ")
diff --git a/test/test-api.cc b/test/test-api.cc
index cefcdd4..e84fa0d 100644
--- a/test/test-api.cc
+++ b/test/test-api.cc
@@ -47,127 +47,127 @@
// Test IsUintN() and IsIntN() against various values and integral types.
TEST(IsUint_IsInt) {
UintIntTest<uint32_t> test_little_values_unsigned[] = {
- { true, true, 1, UINT32_C(0x0) },
- { true, false, 1, UINT32_C(0x1) },
- { false, false, 1, UINT32_C(0x2) },
- { false, false, 1, UINT32_C(0x3) },
- { false, false, 1, UINT32_C(0x4) },
- { false, false, 1, UINT32_C(0x5) },
- { false, false, 1, UINT32_C(0x6) },
- { false, false, 1, UINT32_C(0x7) },
- { false, false, 1, UINT32_C(0x8) },
- { false, false, 1, UINT32_C(0x9) },
- { false, false, 1, UINT32_C(0xa) },
- { false, false, 1, UINT32_C(0xb) },
- { false, false, 1, UINT32_C(0xc) },
- { false, false, 1, UINT32_C(0xd) },
- { false, false, 1, UINT32_C(0xe) },
- { false, false, 1, UINT32_C(0xf) },
+ {true, true, 1, UINT32_C(0x0)},
+ {true, false, 1, UINT32_C(0x1)},
+ {false, false, 1, UINT32_C(0x2)},
+ {false, false, 1, UINT32_C(0x3)},
+ {false, false, 1, UINT32_C(0x4)},
+ {false, false, 1, UINT32_C(0x5)},
+ {false, false, 1, UINT32_C(0x6)},
+ {false, false, 1, UINT32_C(0x7)},
+ {false, false, 1, UINT32_C(0x8)},
+ {false, false, 1, UINT32_C(0x9)},
+ {false, false, 1, UINT32_C(0xa)},
+ {false, false, 1, UINT32_C(0xb)},
+ {false, false, 1, UINT32_C(0xc)},
+ {false, false, 1, UINT32_C(0xd)},
+ {false, false, 1, UINT32_C(0xe)},
+ {false, false, 1, UINT32_C(0xf)},
- { true, true, 2, UINT32_C(0x0) },
- { true, true, 2, UINT32_C(0x1) },
- { true, false, 2, UINT32_C(0x2) },
- { true, false, 2, UINT32_C(0x3) },
- { false, false, 2, UINT32_C(0x4) },
- { false, false, 2, UINT32_C(0x5) },
- { false, false, 2, UINT32_C(0x6) },
- { false, false, 2, UINT32_C(0x7) },
- { false, false, 2, UINT32_C(0x8) },
- { false, false, 2, UINT32_C(0x9) },
- { false, false, 2, UINT32_C(0xa) },
- { false, false, 2, UINT32_C(0xb) },
- { false, false, 2, UINT32_C(0xc) },
- { false, false, 2, UINT32_C(0xd) },
- { false, false, 2, UINT32_C(0xe) },
- { false, false, 2, UINT32_C(0xf) },
+ {true, true, 2, UINT32_C(0x0)},
+ {true, true, 2, UINT32_C(0x1)},
+ {true, false, 2, UINT32_C(0x2)},
+ {true, false, 2, UINT32_C(0x3)},
+ {false, false, 2, UINT32_C(0x4)},
+ {false, false, 2, UINT32_C(0x5)},
+ {false, false, 2, UINT32_C(0x6)},
+ {false, false, 2, UINT32_C(0x7)},
+ {false, false, 2, UINT32_C(0x8)},
+ {false, false, 2, UINT32_C(0x9)},
+ {false, false, 2, UINT32_C(0xa)},
+ {false, false, 2, UINT32_C(0xb)},
+ {false, false, 2, UINT32_C(0xc)},
+ {false, false, 2, UINT32_C(0xd)},
+ {false, false, 2, UINT32_C(0xe)},
+ {false, false, 2, UINT32_C(0xf)},
};
UintIntTest<int32_t> test_little_values_signed[] = {
- { true, true, 1, INT32_C(0) },
- { true, false, 1, INT32_C(1) },
- { false, false, 1, INT32_C(2) },
- { false, false, 1, INT32_C(3) },
- { false, false, 1, INT32_C(4) },
- { false, false, 1, INT32_C(5) },
- { false, false, 1, INT32_C(6) },
- { false, false, 1, INT32_C(7) },
- { false, true, 1, INT32_C(-1) },
- { false, false, 1, INT32_C(-2) },
- { false, false, 1, INT32_C(-3) },
- { false, false, 1, INT32_C(-4) },
- { false, false, 1, INT32_C(-5) },
- { false, false, 1, INT32_C(-6) },
- { false, false, 1, INT32_C(-7) },
- { false, false, 1, INT32_C(-8) },
+ {true, true, 1, INT32_C(0)},
+ {true, false, 1, INT32_C(1)},
+ {false, false, 1, INT32_C(2)},
+ {false, false, 1, INT32_C(3)},
+ {false, false, 1, INT32_C(4)},
+ {false, false, 1, INT32_C(5)},
+ {false, false, 1, INT32_C(6)},
+ {false, false, 1, INT32_C(7)},
+ {false, true, 1, INT32_C(-1)},
+ {false, false, 1, INT32_C(-2)},
+ {false, false, 1, INT32_C(-3)},
+ {false, false, 1, INT32_C(-4)},
+ {false, false, 1, INT32_C(-5)},
+ {false, false, 1, INT32_C(-6)},
+ {false, false, 1, INT32_C(-7)},
+ {false, false, 1, INT32_C(-8)},
- { true, true, 2, INT32_C(0) },
- { true, true, 2, INT32_C(1) },
- { true, false, 2, INT32_C(2) },
- { true, false, 2, INT32_C(3) },
- { false, false, 2, INT32_C(4) },
- { false, false, 2, INT32_C(5) },
- { false, false, 2, INT32_C(6) },
- { false, false, 2, INT32_C(7) },
- { false, true, 2, INT32_C(-1) },
- { false, true, 2, INT32_C(-2) },
- { false, false, 2, INT32_C(-3) },
- { false, false, 2, INT32_C(-4) },
- { false, false, 2, INT32_C(-5) },
- { false, false, 2, INT32_C(-6) },
- { false, false, 2, INT32_C(-7) },
- { false, false, 2, INT32_C(-8) },
+ {true, true, 2, INT32_C(0)},
+ {true, true, 2, INT32_C(1)},
+ {true, false, 2, INT32_C(2)},
+ {true, false, 2, INT32_C(3)},
+ {false, false, 2, INT32_C(4)},
+ {false, false, 2, INT32_C(5)},
+ {false, false, 2, INT32_C(6)},
+ {false, false, 2, INT32_C(7)},
+ {false, true, 2, INT32_C(-1)},
+ {false, true, 2, INT32_C(-2)},
+ {false, false, 2, INT32_C(-3)},
+ {false, false, 2, INT32_C(-4)},
+ {false, false, 2, INT32_C(-5)},
+ {false, false, 2, INT32_C(-6)},
+ {false, false, 2, INT32_C(-7)},
+ {false, false, 2, INT32_C(-8)},
};
UintIntTest<uint32_t> test_u16[] = {
- { true, true, 16, UINT32_C(0x0) },
- { true, false, 16, UINT32_C(0xabcd) },
- { true, false, 16, UINT32_C(0x8000) },
- { true, false, 16, UINT32_C(0xffff) },
- { false, false, 16, UINT32_C(0x10000) },
- { false, false, 16, UINT32_C(0xffff0000) },
- { false, false, 16, UINT32_C(0xffff8000) },
- { false, false, 16, UINT32_C(0xffffffff) },
+ {true, true, 16, UINT32_C(0x0)},
+ {true, false, 16, UINT32_C(0xabcd)},
+ {true, false, 16, UINT32_C(0x8000)},
+ {true, false, 16, UINT32_C(0xffff)},
+ {false, false, 16, UINT32_C(0x10000)},
+ {false, false, 16, UINT32_C(0xffff0000)},
+ {false, false, 16, UINT32_C(0xffff8000)},
+ {false, false, 16, UINT32_C(0xffffffff)},
};
UintIntTest<int32_t> test_i16[] = {
- { true, true, 16, INT32_C(0x0) },
- { true, false, 16, INT32_C(0xabcd) },
- { true, false, 16, INT32_C(0x8000) },
- { true, false, 16, INT32_C(0xffff) },
- { false, false, 16, INT32_C(0x10000) },
- { true, true, 16, INT32_C(42) },
- { false, true, 16, INT32_C(-42) },
- { false, true, 16, INT32_C(-1) },
+ {true, true, 16, INT32_C(0x0)},
+ {true, false, 16, INT32_C(0xabcd)},
+ {true, false, 16, INT32_C(0x8000)},
+ {true, false, 16, INT32_C(0xffff)},
+ {false, false, 16, INT32_C(0x10000)},
+ {true, true, 16, INT32_C(42)},
+ {false, true, 16, INT32_C(-42)},
+ {false, true, 16, INT32_C(-1)},
};
UintIntTest<uint64_t> test_u32[] = {
- { true, true, 32, UINT64_C(0x0) },
- { true, false, 32, UINT64_C(0xabcdabcd) },
- { true, false, 32, UINT64_C(0x80000000) },
- { true, false, 32, UINT64_C(0xffffffff) },
+ {true, true, 32, UINT64_C(0x0)},
+ {true, false, 32, UINT64_C(0xabcdabcd)},
+ {true, false, 32, UINT64_C(0x80000000)},
+ {true, false, 32, UINT64_C(0xffffffff)},
};
UintIntTest<int64_t> test_i32[] = {
- { true, true, 32, INT64_C(0) },
- { true, true, 32, INT64_C(42) },
- { false, true, 32, INT64_C(-42) },
- { false, true, 32, INT64_C(-1) },
- { true, true, 32, INT64_C(2147483647) }, // (1 << (32 - 1)) - 1
- { false, true, 32, INT64_C(-2147483648) }, // -(1 << (32 - 1))
+ {true, true, 32, INT64_C(0)},
+ {true, true, 32, INT64_C(42)},
+ {false, true, 32, INT64_C(-42)},
+ {false, true, 32, INT64_C(-1)},
+ {true, true, 32, INT64_C(2147483647)}, // (1 << (32 - 1)) - 1
+ {false, true, 32, INT64_C(-2147483648)}, // -(1 << (32 - 1))
};
UintIntTest<uint64_t> test_unsigned_higher_than_32[] = {
- { false, false, 54, UINT64_C(0xabcdef9012345678) },
- { true, false, 33, UINT64_C(0x100000000) },
- { true, false, 62, UINT64_C(0x3fffffffffffffff) },
- { true, false, 63, UINT64_C(0x7fffffffffffffff) },
+ {false, false, 54, UINT64_C(0xabcdef9012345678)},
+ {true, false, 33, UINT64_C(0x100000000)},
+ {true, false, 62, UINT64_C(0x3fffffffffffffff)},
+ {true, false, 63, UINT64_C(0x7fffffffffffffff)},
};
UintIntTest<int64_t> test_signed_higher_than_32[] = {
- { true, true, 54, INT64_C(9007199254740991) }, // (1 << (54 - 1)) - 1
- { true, false, 54, INT64_C(9007199254740992) }, // 1 << (54 - 1)
- { true, true, 33, INT64_C(4294967295) }, // (1 << (33 - 1) - 1)
- { false, true, 33, INT64_C(-4294967296) }, // -(1 << (33 - 1))
+ {true, true, 54, INT64_C(9007199254740991)}, // (1 << (54 - 1)) - 1
+ {true, false, 54, INT64_C(9007199254740992)}, // 1 << (54 - 1)
+ {true, true, 33, INT64_C(4294967295)}, // (1 << (33 - 1) - 1)
+ {false, true, 33, INT64_C(-4294967296)}, // -(1 << (33 - 1))
};
#define TEST_LIST(M) \
@@ -181,7 +181,7 @@
M(test_signed_higher_than_32)
-#define TEST_UINT(test_vector) \
+#define TEST_UINT(test_vector) \
for (unsigned i = 0; i < ARRAY_SIZE(test_vector); i++) { \
if (test_vector[i].is_uintn) { \
VIXL_CHECK(IsUintN(test_vector[i].n, test_vector[i].x)); \
@@ -190,17 +190,17 @@
} \
}
-#define TEST_INT(test_vector) \
- for (unsigned i = 0; i < ARRAY_SIZE(test_vector); i++) { \
- if (test_vector[i].is_intn) { \
- VIXL_CHECK(IsIntN(test_vector[i].n, test_vector[i].x)); \
- } else { \
- VIXL_CHECK(!IsIntN(test_vector[i].n, test_vector[i].x)); \
- } \
+#define TEST_INT(test_vector) \
+ for (unsigned i = 0; i < ARRAY_SIZE(test_vector); i++) { \
+ if (test_vector[i].is_intn) { \
+ VIXL_CHECK(IsIntN(test_vector[i].n, test_vector[i].x)); \
+ } else { \
+ VIXL_CHECK(!IsIntN(test_vector[i].n, test_vector[i].x)); \
+ } \
}
-TEST_LIST(TEST_UINT)
-TEST_LIST(TEST_INT)
+ TEST_LIST(TEST_UINT)
+ TEST_LIST(TEST_INT)
#undef TEST_UINT
#undef TEST_INT
diff --git a/test/test-code-buffer.cc b/test/test-code-buffer.cc
index 06c3c83..ce947ca 100644
--- a/test/test-code-buffer.cc
+++ b/test/test-code-buffer.cc
@@ -30,7 +30,7 @@
namespace vixl {
-#define TEST(name) TEST_(CODE_BUFFER_##name)
+#define TEST(name) TEST_(CODE_BUFFER_##name)
TEST(align_grow) {
CodeBuffer code_buffer(2);
diff --git a/test/test-code-generation-scopes.cc b/test/test-code-generation-scopes.cc
index 3b312e9..1fd98d8 100644
--- a/test/test-code-generation-scopes.cc
+++ b/test/test-code-generation-scopes.cc
@@ -34,13 +34,13 @@
#include "aarch64/macro-assembler-aarch64.h"
#endif
-#define TEST(name) TEST_(SCOPES_##name)
+#define TEST(name) TEST_(SCOPES_##name)
#ifdef VIXL_INCLUDE_TARGET_A32
-#define TEST_A32(name) TEST(name)
+#define TEST_A32(name) TEST(name)
#else
// Do not add this test to the harness.
-#define TEST_A32(name) void Test##name()
+#define TEST_A32(name) void Test##name()
#endif
#define __ masm.
@@ -337,7 +337,7 @@
#ifdef VIXL_INCLUDE_TARGET_AARCH32
-#define ASSERT_LITERAL_POOL_SIZE_32(expected) \
+#define ASSERT_LITERAL_POOL_SIZE_32(expected) \
VIXL_CHECK((expected) == masm.GetLiteralPoolSize())
TEST_A32(EmissionCheckScope_emit_pool_32) {
@@ -378,9 +378,9 @@
#ifdef VIXL_INCLUDE_TARGET_AARCH64
-#define ASSERT_LITERAL_POOL_SIZE_64(expected) \
- VIXL_CHECK( \
- (expected + aarch64::kInstructionSize) == masm.GetLiteralPoolSize())
+#define ASSERT_LITERAL_POOL_SIZE_64(expected) \
+ VIXL_CHECK((expected + aarch64::kInstructionSize) == \
+ masm.GetLiteralPoolSize())
TEST(EmissionCheckScope_emit_pool_64) {
aarch64::MacroAssembler masm;
@@ -553,7 +553,7 @@
aarch32::MacroAssembler masm;
{
- CodeBufferCheckScope scope(&masm, aarch32::kA32InstructionSizeInBytes);
+ ExactAssemblyScope scope(&masm, aarch32::kA32InstructionSizeInBytes);
__ mov(aarch32::r0, 0);
scope.Close();
__ Mov(aarch32::r1, 1);
@@ -569,7 +569,7 @@
aarch64::MacroAssembler masm;
{
- CodeBufferCheckScope scope(&masm, aarch64::kInstructionSize);
+ ExactAssemblyScope scope(&masm, aarch64::kInstructionSize);
__ movz(aarch64::x0, 0);
scope.Close();
__ Mov(aarch64::x1, 1);
@@ -621,18 +621,28 @@
aarch32::MacroAssembler masm;
// By default macro instructions are allowed.
+ VIXL_CHECK(!masm.ArePoolsBlocked());
+ VIXL_ASSERT(!masm.AllowAssembler());
VIXL_ASSERT(masm.AllowMacroInstructions());
{
ExactAssemblyScope scope1(&masm, 2 * aarch32::kA32InstructionSizeInBytes);
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
__ nop();
{
ExactAssemblyScope scope2(&masm, 1 * aarch32::kA32InstructionSizeInBytes);
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
__ nop();
}
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
}
+ VIXL_CHECK(!masm.ArePoolsBlocked());
+ VIXL_ASSERT(!masm.AllowAssembler());
VIXL_ASSERT(masm.AllowMacroInstructions());
{
@@ -651,18 +661,28 @@
aarch64::MacroAssembler masm;
// By default macro instructions are allowed.
+ VIXL_CHECK(!masm.ArePoolsBlocked());
+ VIXL_ASSERT(!masm.AllowAssembler());
VIXL_ASSERT(masm.AllowMacroInstructions());
{
ExactAssemblyScope scope1(&masm, 2 * aarch64::kInstructionSize);
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
__ nop();
{
ExactAssemblyScope scope2(&masm, 1 * aarch64::kInstructionSize);
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
__ nop();
}
+ VIXL_CHECK(masm.ArePoolsBlocked());
+ VIXL_ASSERT(masm.AllowAssembler());
VIXL_ASSERT(!masm.AllowMacroInstructions());
}
+ VIXL_CHECK(!masm.ArePoolsBlocked());
+ VIXL_ASSERT(!masm.AllowAssembler());
VIXL_ASSERT(masm.AllowMacroInstructions());
{
@@ -692,7 +712,8 @@
// The literal pool should be generated when opening this scope, as
// otherwise the `Ldrd` will run out of range when we generate the `nop`
// instructions below.
- ExactAssemblyScope scope(&masm, n_nops * aarch32::kA32InstructionSizeInBytes);
+ ExactAssemblyScope scope(&masm,
+ n_nops * aarch32::kA32InstructionSizeInBytes);
// Although it must be, we do not check that the literal pool size is zero
// here, because we want this regression test to fail while or after we
@@ -745,4 +766,3 @@
} // namespace vixl
-
diff --git a/test/test-invalset.cc b/test/test-invalset.cc
index f8af5bf..f3aeb91 100644
--- a/test/test-invalset.cc
+++ b/test/test-invalset.cc
@@ -31,7 +31,7 @@
// This file contains tests for the `InvalSet` and `InvalSetIterator` classes.
-#define TEST(name) TEST_(INVALSET_##name)
+#define TEST(name) TEST_(INVALSET_##name)
typedef ptrdiff_t KeyType;
typedef ptrdiff_t ValType;
@@ -48,16 +48,14 @@
return (key_ == other.key_) && (val_ == other.val_);
}
bool operator<(const Obj& other) const {
- return (key_ < other.key_) ||
- ((key_ == other.key_) && (val_ < other.val_));
+ return (key_ < other.key_) || ((key_ == other.key_) && (val_ < other.val_));
}
bool operator<=(const Obj& other) const {
return (key_ <= other.key_) ||
- ((key_ == other.key_) && (val_ <= other.val_));
+ ((key_ == other.key_) && (val_ <= other.val_));
}
bool operator>(const Obj& other) const {
- return (key_ > other.key_) ||
- ((key_ == other.key_) && (val_ > other.val_));
+ return (key_ > other.key_) || ((key_ == other.key_) && (val_ > other.val_));
}
};
@@ -73,7 +71,7 @@
kReclaimFrom,
kReclaimFactor> TestSet;
-template<>
+template <>
inline KeyType InvalSet<Obj,
kNPreallocatedElements,
KeyType,
@@ -82,7 +80,7 @@
kReclaimFactor>::GetKey(const Obj& obj) {
return obj.key_;
}
-template<>
+template <>
inline void InvalSet<Obj,
kNPreallocatedElements,
KeyType,
@@ -234,8 +232,7 @@
VIXL_CHECK(total == expected_total);
// Test with more elements.
- for (unsigned i = kNPreallocatedElements;
- i < 4 * kNPreallocatedElements;
+ for (unsigned i = kNPreallocatedElements; i < 4 * kNPreallocatedElements;
i++) {
set.insert(Obj(i, i));
expected_total += i;
@@ -304,8 +301,7 @@
VIXL_CHECK(total == expected_total);
// Test with more elements.
- for (unsigned i = kNPreallocatedElements;
- i < 4 * kNPreallocatedElements;
+ for (unsigned i = kNPreallocatedElements; i < 4 * kNPreallocatedElements;
i++) {
set.insert(Obj(i, i));
expected_total += i;
@@ -355,10 +351,10 @@
TEST(stl_forward_iterator) {
{
- TestSet::iterator default_it; // Default-constructible.
- TestSet::iterator copy_it(default_it); // Copy-constructible.
- copy_it = default_it; // Copy-assignable.
- } // Destructible.
+ TestSet::iterator default_it; // Default-constructible.
+ TestSet::iterator copy_it(default_it); // Copy-constructible.
+ copy_it = default_it; // Copy-assignable.
+ } // Destructible.
TestSet set1;
VIXL_CHECK(set1.empty() && (set1.size() == 0));
diff --git a/test/test-operands.cc b/test/test-operands.cc
index 5a239e3..30c8527 100644
--- a/test/test-operands.cc
+++ b/test/test-operands.cc
@@ -33,7 +33,7 @@
#include "aarch32/operands-aarch32.h"
#endif
-#define TEST_AARCH32(name) TEST_(AARCH32_OPERANDS_##name)
+#define TEST_AARCH32(name) TEST_(AARCH32_OPERANDS_##name)
namespace vixl {
diff --git a/test/test-runner.cc b/test/test-runner.cc
index ae615a0..361d69c 100644
--- a/test/test-runner.cc
+++ b/test/test-runner.cc
@@ -57,7 +57,7 @@
// Instantiate a Test and append it to the linked list.
vixl::Test::Test(const char* name, TestFunction* callback)
- : name_(name), callback_(callback), next_(NULL) {
+ : name_(name), callback_(callback), next_(NULL) {
// Append this test to the linked list.
if (first_ == NULL) {
VIXL_ASSERT(last_ == NULL);
@@ -86,11 +86,11 @@
}
-static void NormalizeOption(char * arg) {
+static void NormalizeOption(char* arg) {
// Squash all '_' characters in options. This allows --trace_sim and
// --trace-sim to be handled in the same way, for example.
VIXL_ASSERT(IsOption(arg));
- for (char * c = arg; *c != '\0'; c++) {
+ for (char* c = arg; *c != '\0'; c++) {
if (*c == '_') {
*c = '-';
}
@@ -99,7 +99,8 @@
static void PrintHelpMessage() {
- printf("Usage: ./test [options] [test names]\n"
+ printf(
+ "Usage: ./test [options] [test names]\n"
"Run all tests specified on the command line.\n"
"--help Print this help message.\n"
"--list List all available tests.\n"
@@ -225,4 +226,3 @@
return EXIT_SUCCESS;
}
-
diff --git a/test/test-runner.h b/test/test-runner.h
index ae73911..ea1bf48 100644
--- a/test/test-runner.h
+++ b/test/test-runner.h
@@ -34,7 +34,7 @@
// Each actual test is represented by a Test instance.
// Tests are appended to a static linked list upon creation.
class Test {
- typedef void (TestFunction)();
+ typedef void(TestFunction)();
public:
Test(const char* name, TestFunction* callback);
@@ -90,10 +90,10 @@
// Macro to register a test. It instantiates a Test and registers its
// callback function.
-#define TEST_(Name) \
-void Test##Name(); \
-Test test_##Name(#Name, &Test##Name); \
-void Test##Name()
+#define TEST_(Name) \
+ void Test##Name(); \
+ Test test_##Name(#Name, &Test##Name); \
+ void Test##Name()
} // namespace vixl
#endif // TEST_TEST_H_
diff --git a/test/test-use-scratch-register-scope.cc b/test/test-use-scratch-register-scope.cc
index 1cac6e6..a11c405 100644
--- a/test/test-use-scratch-register-scope.cc
+++ b/test/test-use-scratch-register-scope.cc
@@ -38,21 +38,25 @@
#define STRINGIFY(x) #x
-#define TEST_AARCH32(Name) \
- namespace aarch32 { void Test_##Name##_AArch32_Impl(); } \
- void Test_##Name##_AArch32() { aarch32::Test_##Name##_AArch32_Impl(); } \
- Test test_##Name##_AArch32(STRINGIFY(AARCH32_SCRATCH_##Name), \
- &Test_##Name##_AArch32); \
+#define TEST_AARCH32(Name) \
+ namespace aarch32 { \
+ void Test_##Name##_AArch32_Impl(); \
+ } \
+ void Test_##Name##_AArch32() { aarch32::Test_##Name##_AArch32_Impl(); } \
+ Test test_##Name##_AArch32(STRINGIFY(AARCH32_SCRATCH_##Name), \
+ &Test_##Name##_AArch32); \
void aarch32::Test_##Name##_AArch32_Impl()
-#define TEST_AARCH64(Name) \
- namespace aarch64 { void Test_##Name##_AArch64_Impl(); } \
- void Test_##Name##_AArch64() { aarch64::Test_##Name##_AArch64_Impl(); } \
- Test test_##Name##_AArch64(STRINGIFY(AARCH64_SCRATCH_##Name), \
- &Test_##Name##_AArch64); \
+#define TEST_AARCH64(Name) \
+ namespace aarch64 { \
+ void Test_##Name##_AArch64_Impl(); \
+ } \
+ void Test_##Name##_AArch64() { aarch64::Test_##Name##_AArch64_Impl(); } \
+ Test test_##Name##_AArch64(STRINGIFY(AARCH64_SCRATCH_##Name), \
+ &Test_##Name##_AArch64); \
void aarch64::Test_##Name##_AArch64_Impl()
-#define SETUP() MacroAssembler masm
+#define SETUP() MacroAssembler masm
#define TEARDOWN()
#define __ masm.
@@ -61,7 +65,7 @@
// UseScratchRegisterScopes must be able to nest perfectly. That is, they may
// nest, but nested scopes must not outlive less-nested scopes.
-template<typename MacroAssembler, typename UseScratchRegisterScope>
+template <typename MacroAssembler, typename UseScratchRegisterScope>
class PerfectNestingTestHelper {
public:
explicit PerfectNestingTestHelper(MacroAssembler* masm) : masm_(masm) {
@@ -69,7 +73,8 @@
seed48(seed);
}
void Run() {
- UseScratchRegisterScope* top_scope = masm_->GetCurrentScratchRegisterScope();
+ UseScratchRegisterScope* top_scope =
+ masm_->GetCurrentScratchRegisterScope();
int descendents = 0;
while (descendents < kMinimumDescendentScopeCount) descendents += Run(0);
VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == top_scope);
@@ -99,8 +104,8 @@
#ifdef VIXL_INCLUDE_TARGET_AARCH32
TEST_AARCH32(perfect_nesting) {
SETUP();
- PerfectNestingTestHelper<MacroAssembler,
- UseScratchRegisterScope>(&masm).Run();
+ PerfectNestingTestHelper<MacroAssembler, UseScratchRegisterScope>(&masm)
+ .Run();
TEARDOWN();
}
#endif // VIXL_INCLUDE_TARGET_AARCH32
@@ -108,8 +113,8 @@
#ifdef VIXL_INCLUDE_TARGET_AARCH64
TEST_AARCH64(perfect_nesting) {
SETUP();
- PerfectNestingTestHelper<MacroAssembler,
- UseScratchRegisterScope>(&masm).Run();
+ PerfectNestingTestHelper<MacroAssembler, UseScratchRegisterScope>(&masm)
+ .Run();
TEARDOWN();
}
#endif // VIXL_INCLUDE_TARGET_AARCH64
diff --git a/test/test-utils.cc b/test/test-utils.cc
index 4abcf56..5d08ab4 100644
--- a/test/test-utils.cc
+++ b/test/test-utils.cc
@@ -58,7 +58,7 @@
#if defined(__aarch64__) && defined(VIXL_INCLUDE_TARGET_AARCH64)
aarch64::CPU::EnsureIAndDCacheCoherency(buffer, size);
#elif defined(__arm__) && \
- (defined(VIXL_INCLUDE_TARGET_A32) || defined(VIXL_INCLUDE_TARGET_T32))
+ (defined(VIXL_INCLUDE_TARGET_A32) || defined(VIXL_INCLUDE_TARGET_T32))
// TODO: Do not use __builtin___clear_cache and instead implement
// `CPU::EnsureIAndDCacheCoherency` for aarch32.
__builtin___clear_cache(buffer, reinterpret_cast<char*>(buffer) + size);
diff --git a/tools/clang_format.py b/tools/clang_format.py
index 3121ade..aebb458 100755
--- a/tools/clang_format.py
+++ b/tools/clang_format.py
@@ -159,7 +159,7 @@
return rc
-def Find(path, filters = ['*']):
+def Find(path, filters = ['*'], excluded_dir = ""):
files_found = []
def NameMatchesAnyFilter(name, ff):
@@ -169,8 +169,14 @@
return False
for root, dirs, files in os.walk(path):
- files_found += [os.path.relpath(os.path.join(root, fn))
- for fn in files if NameMatchesAnyFilter(fn, filters)]
+ files_found += [
+ os.path.join(root, fn)
+ for fn in files
+ # Include files which names match "filters".
+ # Exclude files for which the base directory is "excluded_dir".
+ if NameMatchesAnyFilter(os.path.relpath(fn), filters) and \
+ not os.path.dirname(os.path.join(root, fn)).endswith(excluded_dir)
+ ]
return files_found
@@ -180,9 +186,10 @@
config.dir_aarch32_examples,
config.dir_aarch64_benchmarks,
config.dir_aarch64_examples,
+ config.dir_tests,
config.dir_src_vixl ]
for directory in source_dirs:
- sources += Find(directory, ['*.h', '*.cc'])
+ sources += Find(directory, ['*.h', '*.cc'], 'traces')
return sources
diff --git a/tools/config.py b/tools/config.py
index 7137ec3..b492652 100644
--- a/tools/config.py
+++ b/tools/config.py
@@ -40,17 +40,23 @@
dir_aarch64_traces = os.path.join(dir_tests, 'aarch64', 'traces')
dir_aarch32_traces = os.path.join(dir_tests, 'aarch32', 'traces')
+# The following set of options are tested in all combinations. The order is
+# important; the _first_ option in each list is used as the basis for the
+# independently-tested options below.
# The full list of available build modes.
-build_options_modes = ['release', 'debug']
-# The list of target arch/isa options to test with. Do not list 'all' as an
-# option since it is the default.
-build_options_target = ['a32', 't32', 'a32,t32', 'a64', 'a64,a32', 'a64,t32']
-# Negative testing is off by default, so do not list 'off' as an option.
-build_options_negative_testing = ['on']
+build_options_modes = ['debug', 'release']
# The list of C++ standard to test for.
tested_cpp_standards = ['c++98', 'c++11']
# The list of compilers tested.
tested_compilers = ['clang++', 'g++']
+# The following sets of options are tested independently, and are not combined
+# with one another. Each one in turn is appended to the first combination of the
+# options above.
+# The list of target arch/isa options to test with. Do not list 'all' as an
+# option since it is the default.
+build_options_target = ['a32', 't32', 'a32,t32', 'a64', 'a64,a32', 'a64,t32']
+# Negative testing is off by default, so do not list 'off' as an option.
+build_options_negative_testing = ['on']
diff --git a/tools/test.py b/tools/test.py
index 7806eed..fa07544 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -221,6 +221,10 @@
and run only with one compiler, in one mode,
with one C++ standard, and with an appropriate
default for runtime options.''')
+ general_arguments.add_argument('--dry-run', action='store_true',
+ help='''Don't actually build or run anything,
+ but print the configurations that would be
+ tested.''')
general_arguments.add_argument(
'--jobs', '-j', metavar='N', type=int, nargs='?',
default=multiprocessing.cpu_count(),
@@ -438,11 +442,11 @@
SetFast(build_option_mode, args.mode, 'debug')
SetFast(runtime_option_debugger, args.debugger, 'on')
- if not args.nolint and not args.fast:
+ if not args.nolint and not (args.fast or args.dry_run):
rc |= RunLinter()
MaybeExitEarly(rc)
- if not args.noclang_format and not args.fast:
+ if not args.noclang_format and not (args.fast or args.dry_run):
rc |= RunClangFormat()
MaybeExitEarly(rc)
@@ -460,22 +464,33 @@
]
return list(itertools.product(*opts_list))
# List combinations of options that should only be tested independently.
- def ListIndependentCombinations(args, options):
+ def ListIndependentCombinations(args, options, base):
n = []
for opt in options:
if opt.test_independently:
for o in opt.ArgList(args.__dict__[opt.name]):
- n.append((o,))
+ n.append(base + (o,))
return n
# TODO: We should refine the configurations we test by default, instead of
# always testing all possible combinations.
test_env_combinations = ListCombinations(args, test_environment_options)
test_build_combinations = ListCombinations(args, test_build_options)
- test_build_combinations.extend(ListIndependentCombinations(args, test_build_options))
+ if not args.fast:
+ test_build_combinations.extend(
+ ListIndependentCombinations(args,
+ test_build_options,
+ test_build_combinations[0]))
test_runtime_combinations = ListCombinations(args, test_runtime_options)
for environment_options in test_env_combinations:
for build_options in test_build_combinations:
+ if (args.dry_run):
+ for runtime_options in test_runtime_combinations:
+ print(' '.join(filter(None, environment_options)) + ', ' +
+ ' '.join(filter(None, build_options)) + ', ' +
+ ' '.join(filter(None, runtime_options)))
+ continue
+
# Avoid going through the build stage if we are not using the build
# result.
if not (args.notest and args.nobench):
@@ -509,6 +524,7 @@
rc |= RunBenchmarks(build_options, args)
MaybeExitEarly(rc)
- PrintStatus(rc == 0)
+ if not args.dry_run:
+ PrintStatus(rc == 0)
sys.exit(rc)