Improve infrastructure for dealing with endianness in VEX.  This patch
removes all decisions about endianness from VEX.  Instead, it requires
that the LibVEX_* calls pass in information about the guest or host
endianness (depending on context) and in turn it passes that info
through to all the places that need it:

* the front ends (xx_toIR.c)
* the back ends (xx_isel.c)
* the patcher functions (Chain, UnChain, PatchProfInc)

Mostly it is boring and ugly plumbing.  As far as types go, there is a
new type "VexEndness" that carries the endianness.  This also makes it
possible to stop using Bools to indicate endianness.  VexArchInfo has
a new field of type VexEndness.  Apart from that, no other changes in
types.

Followups: MIPS front and back ends have not yet been fixed up to use
the passed-in endianness information.  Currently they assume that the
endianness of both host and guest is the same as the endianness of the
target for which VEX is being compiled.



git-svn-id: svn://svn.valgrind.org/vex/trunk@2910 8f6e269a-dfd6-0310-a8e1-e2731360e62c
diff --git a/priv/guest_amd64_defs.h b/priv/guest_amd64_defs.h
index 42a6a37..008638e 100644
--- a/priv/guest_amd64_defs.h
+++ b/priv/guest_amd64_defs.h
@@ -60,7 +60,7 @@
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian,
+                           VexEndness   host_endness,
                            Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_amd64_toIR.c b/priv/guest_amd64_toIR.c
index 563e957..ba23ee2 100644
--- a/priv/guest_amd64_toIR.c
+++ b/priv/guest_amd64_toIR.c
@@ -185,7 +185,7 @@
    that we don't have to pass them around endlessly. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area (points to start of BB, not to the
    insn being processed). */
@@ -975,7 +975,7 @@
 
 static IRExpr* getIRegCL ( void )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return IRExpr_Get( OFFB_RCX, Ity_I8 );
 }
 
@@ -984,7 +984,7 @@
 
 static void putIRegAH ( IRExpr* e )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
    stmt( IRStmt_Put( OFFB_RAX+1, e ) );
 }
@@ -1006,7 +1006,7 @@
 
 static IRExpr* getIRegRAX ( Int sz )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 1: return IRExpr_Get( OFFB_RAX, Ity_I8 );
       case 2: return IRExpr_Get( OFFB_RAX, Ity_I16 );
@@ -1019,7 +1019,7 @@
 static void putIRegRAX ( Int sz, IRExpr* e )
 {
    IRType ty = typeOfIRExpr(irsb->tyenv, e);
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 8: vassert(ty == Ity_I64);
               stmt( IRStmt_Put( OFFB_RAX, e ));
@@ -1054,7 +1054,7 @@
 
 static IRExpr* getIRegRDX ( Int sz )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 1: return IRExpr_Get( OFFB_RDX, Ity_I8 );
       case 2: return IRExpr_Get( OFFB_RDX, Ity_I16 );
@@ -1066,7 +1066,7 @@
 
 static void putIRegRDX ( Int sz, IRExpr* e )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
    switch (sz) {
       case 8: stmt( IRStmt_Put( OFFB_RDX, e ));
@@ -1108,7 +1108,7 @@
 
 static IRExpr* getIReg32 ( UInt regno )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return unop(Iop_64to32,
                IRExpr_Get( integerGuestReg64Offset(regno),
                            Ity_I64 ));
@@ -1132,7 +1132,7 @@
 
 static IRExpr* getIReg16 ( UInt regno )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return IRExpr_Get( integerGuestReg64Offset(regno),
                       Ity_I16 );
 }
@@ -1253,7 +1253,7 @@
 static UInt offsetIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
 {
    UInt reg;
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(IS_VALID_PFX(pfx));
    vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
    reg = gregOfRexRM( pfx, mod_reg_rm );
@@ -1332,7 +1332,7 @@
 static UInt offsetIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
 {
    UInt reg;
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(IS_VALID_PFX(pfx));
    vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
    reg = eregOfRexRM( pfx, mod_reg_rm );
@@ -1401,7 +1401,7 @@
 static Int xmmGuestRegOffset ( UInt xmmreg )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return ymmGuestRegOffset( xmmreg );
 }
 
@@ -1411,7 +1411,7 @@
 static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
 }
@@ -1419,7 +1419,7 @@
 static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
 }
@@ -1427,7 +1427,7 @@
 static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
 }
@@ -1435,7 +1435,7 @@
 static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return ymmGuestRegOffset( ymmreg ) + 16 * laneno;
 }
@@ -1443,7 +1443,7 @@
 static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return ymmGuestRegOffset( ymmreg ) + 8 * laneno;
 }
@@ -1451,7 +1451,7 @@
 static Int ymmGuestRegLane32offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return ymmGuestRegOffset( ymmreg ) + 4 * laneno;
 }
@@ -31745,7 +31745,7 @@
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian_IN,
+                           VexEndness   host_endness_IN,
                            Bool         sigill_diag_IN )
 {
    Int       i, x1, x2;
@@ -31756,7 +31756,7 @@
    vassert(guest_arch == VexArchAMD64);
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
    guest_RIP_curr_instr = guest_IP;
    guest_RIP_bbstart    = guest_IP - delta;
 
diff --git a/priv/guest_arm64_defs.h b/priv/guest_arm64_defs.h
index b8eb1ff..7d772c8 100644
--- a/priv/guest_arm64_defs.h
+++ b/priv/guest_arm64_defs.h
@@ -50,7 +50,7 @@
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian,
+                           VexEndness   host_endness,
                            Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_arm64_toIR.c b/priv/guest_arm64_toIR.c
index f3b8d54..4132780 100644
--- a/priv/guest_arm64_toIR.c
+++ b/priv/guest_arm64_toIR.c
@@ -119,9 +119,10 @@
    not change during translation of the instruction.
 */
 
-/* CONST: is the host bigendian?  We need to know this in order to do
-   sub-register accesses to the SIMD/FP registers correctly. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  We need to know this in
+   order to do sub-register accesses to the SIMD/FP registers
+   correctly. */
+static VexEndness host_endness;
 
 /* CONST: The guest address for the instruction currently being
    translated.  */
@@ -1227,7 +1228,7 @@
    has the lowest offset. */
 static Int offsetQRegLane ( UInt qregNo, IRType laneTy, UInt laneNo )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    Int base = offsetQReg128(qregNo);
    /* Since the host is little-endian, the least significant lane
       will be at the lowest address. */
@@ -10355,7 +10356,7 @@
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian_IN,
+                           VexEndness   host_endness_IN,
                            Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -10365,7 +10366,7 @@
    vassert(guest_arch == VexArchARM64);
 
    irsb                = irsb_IN;
-   host_is_bigendian   = host_bigendian_IN;
+   host_endness        = host_endness_IN;
    guest_PC_curr_instr = (Addr64)guest_IP;
 
    /* Sanity checks */
diff --git a/priv/guest_arm_defs.h b/priv/guest_arm_defs.h
index 776abb7..ce18017 100644
--- a/priv/guest_arm_defs.h
+++ b/priv/guest_arm_defs.h
@@ -52,7 +52,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_arm_toIR.c b/priv/guest_arm_toIR.c
index 7a53059..a7ab76b 100644
--- a/priv/guest_arm_toIR.c
+++ b/priv/guest_arm_toIR.c
@@ -123,10 +123,10 @@
    not change during translation of the instruction.
 */
 
-/* CONST: is the host bigendian?  This has to do with float vs double
-   register accesses on VFP, but it's complex and not properly thought
-   out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
 
 /* CONST: The guest address for the instruction currently being
    translated.  This is the real, "decoded" address (not subject
@@ -849,11 +849,11 @@
    Int off;
    vassert(fregNo < 32);
    off = doubleGuestRegOffset(fregNo >> 1);
-   if (host_is_bigendian) {
-      vassert(0);
-   } else {
+   if (host_endness == VexEndnessLE) {
       if (fregNo & 1)
          off += 4;
+   } else {
+      vassert(0);
    }
    return off;
 }
@@ -21976,7 +21976,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -21985,9 +21985,9 @@
    /* Set globals (see top of this file) */
    vassert(guest_arch == VexArchARM);
 
-   irsb              = irsb_IN;
-   host_is_bigendian = host_bigendian_IN;
-   __curr_is_Thumb   = isThumb;
+   irsb            = irsb_IN;
+   host_endness    = host_endness_IN;
+   __curr_is_Thumb = isThumb;
 
    if (isThumb) {
       guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED - 1;
diff --git a/priv/guest_generic_bb_to_IR.c b/priv/guest_generic_bb_to_IR.c
index 8bba8de..63e6a2a 100644
--- a/priv/guest_generic_bb_to_IR.c
+++ b/priv/guest_generic_bb_to_IR.c
@@ -186,7 +186,7 @@
          /*IN*/ UChar*           guest_code,
          /*IN*/ Addr64           guest_IP_bbstart,
          /*IN*/ Bool             (*chase_into_ok)(void*,Addr64),
-         /*IN*/ Bool             host_bigendian,
+         /*IN*/ VexEndness       host_endness,
          /*IN*/ Bool             sigill_diag,
          /*IN*/ VexArch          arch_guest,
          /*IN*/ VexArchInfo*     archinfo_guest,
@@ -362,7 +362,7 @@
                             arch_guest,
                             archinfo_guest,
                             abiinfo_both,
-                            host_bigendian,
+                            host_endness,
                             sigill_diag );
 
       /* stay sane ... */
diff --git a/priv/guest_generic_bb_to_IR.h b/priv/guest_generic_bb_to_IR.h
index 30e216d..5a7f016 100644
--- a/priv/guest_generic_bb_to_IR.h
+++ b/priv/guest_generic_bb_to_IR.h
@@ -152,8 +152,8 @@
       /* ABI info for both guest and host */
       /*IN*/  VexAbiInfo*  abiinfo,
 
-      /* Is the host bigendian? */
-      /*IN*/  Bool         host_bigendian,
+      /* The endianness of the host */
+      /*IN*/  VexEndness   host_endness,
 
       /* Should diagnostics be printed for illegal instructions? */
       /*IN*/  Bool         sigill_diag
@@ -176,7 +176,7 @@
          /*IN*/ UChar*           guest_code,
          /*IN*/ Addr64           guest_IP_bbstart,
          /*IN*/ Bool             (*chase_into_ok)(void*,Addr64),
-         /*IN*/ Bool             host_bigendian,
+         /*IN*/ VexEndness       host_endness,
          /*IN*/ Bool             sigill_diag,
          /*IN*/ VexArch          arch_guest,
          /*IN*/ VexArchInfo*     archinfo_guest,
diff --git a/priv/guest_mips_defs.h b/priv/guest_mips_defs.h
index da112c5..1092aca 100644
--- a/priv/guest_mips_defs.h
+++ b/priv/guest_mips_defs.h
@@ -51,7 +51,7 @@
                                  VexArch      guest_arch,
                                  VexArchInfo* archinfo,
                                  VexAbiInfo*  abiinfo,
-                                 Bool         host_bigendian,
+                                 VexEndness   host_endness,
                                  Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_mips_toIR.c b/priv/guest_mips_toIR.c
index 0e9f160..57be41e 100644
--- a/priv/guest_mips_toIR.c
+++ b/priv/guest_mips_toIR.c
@@ -49,10 +49,10 @@
    that we don't have to pass them around endlessly. CONST means does
    not change during translation of the instruction. */
 
-/* CONST: is the host bigendian?  This has to do with float vs double
-   register accesses on VFP, but it's complex and not properly thought
-   out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
 
 /* Pointer to the guest code area. */
 static UChar *guest_code;
@@ -17202,7 +17202,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -17217,7 +17217,7 @@
 
    guest_code = guest_code_IN;
    irsb = irsb_IN;
-   host_is_bigendian = host_bigendian_IN;
+   host_endness = host_endness_IN;
 #if defined(VGP_mips32_linux)
    guest_PC_curr_instr = (Addr32)guest_IP;
 #elif defined(VGP_mips64_linux)
diff --git a/priv/guest_ppc_defs.h b/priv/guest_ppc_defs.h
index a27c96f..944989d 100644
--- a/priv/guest_ppc_defs.h
+++ b/priv/guest_ppc_defs.h
@@ -61,7 +61,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_ppc_toIR.c b/priv/guest_ppc_toIR.c
index 0b8cbff..2b4d4bd 100644
--- a/priv/guest_ppc_toIR.c
+++ b/priv/guest_ppc_toIR.c
@@ -154,7 +154,7 @@
    given insn. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area. */
 static UChar* guest_code;
@@ -1039,7 +1039,7 @@
    // jrs: probably not necessary; only matters if we reference sub-parts
    // of the ppc registers, but that isn't the case
    // later: this might affect Altivec though?
-   vassert(host_is_bigendian);
+   vassert(host_endness == VexEndnessBE);
 
    switch (archreg) {
    case  0: return offsetofPPCGuestState(guest_GPR0);
@@ -19941,7 +19941,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    IRType     ty;
@@ -19973,7 +19973,7 @@
    /* Set globals (see top of this file) */
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
 
    guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
    guest_CIA_bbstart    = mkSzAddr(ty, guest_IP - delta);
diff --git a/priv/guest_s390_defs.h b/priv/guest_s390_defs.h
index 63dd1af..6aca7f5 100644
--- a/priv/guest_s390_defs.h
+++ b/priv/guest_s390_defs.h
@@ -50,7 +50,7 @@
                           VexArch      guest_arch,
                           VexArchInfo* archinfo,
                           VexAbiInfo*  abiinfo,
-                          Bool         host_bigendian,
+                          VexEndness   host_endness,
                           Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_s390_toIR.c b/priv/guest_s390_toIR.c
index b6fc165..4a5013b 100644
--- a/priv/guest_s390_toIR.c
+++ b/priv/guest_s390_toIR.c
@@ -16565,13 +16565,13 @@
               VexArch      guest_arch,
               VexArchInfo *archinfo,
               VexAbiInfo  *abiinfo,
-              Bool         host_bigendian,
+              VexEndness   host_endness,
               Bool         sigill_diag_IN)
 {
    vassert(guest_arch == VexArchS390X);
 
    /* The instruction decoder requires a big-endian machine. */
-   vassert(host_bigendian == True);
+   vassert(host_endness == VexEndnessBE);
 
    /* Set globals (see top of this file) */
    guest_IA_curr_instr = guest_IP;
diff --git a/priv/guest_x86_defs.h b/priv/guest_x86_defs.h
index 1c64912..e7bc530 100644
--- a/priv/guest_x86_defs.h
+++ b/priv/guest_x86_defs.h
@@ -60,7 +60,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
diff --git a/priv/guest_x86_toIR.c b/priv/guest_x86_toIR.c
index 37afd97..2ba4394 100644
--- a/priv/guest_x86_toIR.c
+++ b/priv/guest_x86_toIR.c
@@ -195,7 +195,7 @@
    given insn. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area (points to start of BB, not to the
    insn being processed). */
@@ -452,7 +452,7 @@
    vassert(archreg < 8);
 
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
 
    if (sz == 4 || sz == 2 || (sz == 1 && archreg < 4)) {
       switch (archreg) {
@@ -515,7 +515,7 @@
 static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
 }
@@ -523,7 +523,7 @@
 static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
 }
@@ -531,7 +531,7 @@
 static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
 }
@@ -15421,7 +15421,7 @@
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    Int       i, x1, x2;
@@ -15432,7 +15432,7 @@
    vassert(guest_arch == VexArchX86);
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
    guest_EIP_curr_instr = (Addr32)guest_IP;
    guest_EIP_bbstart    = (Addr32)toUInt(guest_IP - delta);
 
diff --git a/priv/host_amd64_defs.c b/priv/host_amd64_defs.c
index cd5893d..9a4aa3e 100644
--- a/priv/host_amd64_defs.c
+++ b/priv/host_amd64_defs.c
@@ -2265,7 +2265,7 @@
 
 Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
                       UChar* buf, Int nbuf, AMD64Instr* i, 
-                      Bool mode64,
+                      Bool mode64, VexEndness endness_host,
                       void* disp_cp_chain_me_to_slowEP,
                       void* disp_cp_chain_me_to_fastEP,
                       void* disp_cp_xindir,
@@ -3499,7 +3499,7 @@
       p = doAMode_M(p, fake(4), i->Ain.EvCheck.amFailAddr);
       vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
       /* And crosscheck .. */
-      vassert(evCheckSzB_AMD64() == 8);
+      vassert(evCheckSzB_AMD64(endness_host) == 8);
       goto done;
    }
 
@@ -3542,7 +3542,7 @@
 /* How big is an event check?  See case for Ain_EvCheck in
    emit_AMD64Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_AMD64 ( void )
+Int evCheckSzB_AMD64 ( VexEndness endness_host )
 {
    return 8;
 }
@@ -3550,10 +3550,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                   void* place_to_chain,
                                    void* disp_cp_chain_me_EXPECTED,
                                    void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movabsq $disp_cp_chain_me_EXPECTED, %r11
         call *%r11
@@ -3636,10 +3639,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
                                      void* place_to_jump_to_EXPECTED,
                                      void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is either:
         (general case)
           movabsq $place_to_jump_to_EXPECTED, %r11
@@ -3700,9 +3706,11 @@
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Ain_ProfInc case for emit_AMD64Instr. */
-VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
+VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                   void*  place_to_patch,
                                    ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 8);
    UChar* p = (UChar*)place_to_patch;
    vassert(p[0] == 0x49);
diff --git a/priv/host_amd64_defs.h b/priv/host_amd64_defs.h
index d774332..c8b49c9 100644
--- a/priv/host_amd64_defs.h
+++ b/priv/host_amd64_defs.h
@@ -754,8 +754,10 @@
 extern void         mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
 extern Bool         isMove_AMD64Instr      ( AMD64Instr*, HReg*, HReg* );
 extern Int          emit_AMD64Instr        ( /*MB_MOD*/Bool* is_profInc,
-                                             UChar* buf, Int nbuf, AMD64Instr* i, 
+                                             UChar* buf, Int nbuf,
+                                             AMD64Instr* i, 
                                              Bool mode64,
+                                             VexEndness endness_host,
                                              void* disp_cp_chain_me_to_slowEP,
                                              void* disp_cp_chain_me_to_fastEP,
                                              void* disp_cp_xindir,
@@ -782,19 +784,22 @@
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_AMD64 ( void );
+extern Int evCheckSzB_AMD64 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                          void* place_to_chain,
                                           void* disp_cp_chain_me_EXPECTED,
                                           void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
                                             void* place_to_jump_to_EXPECTED,
                                             void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                          void*  place_to_patch,
                                           ULong* location_of_counter );
 
 
diff --git a/priv/host_amd64_isel.c b/priv/host_amd64_isel.c
index 39d7941..8dc67b3 100644
--- a/priv/host_amd64_isel.c
+++ b/priv/host_amd64_isel.c
@@ -4877,6 +4877,9 @@
                      | VEX_HWCAPS_AMD64_BMI
                      | VEX_HWCAPS_AMD64_AVX2)));
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
diff --git a/priv/host_arm64_defs.c b/priv/host_arm64_defs.c
index e55430f..ef451a3 100644
--- a/priv/host_arm64_defs.c
+++ b/priv/host_arm64_defs.c
@@ -4104,7 +4104,7 @@
 
 Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
                       UChar* buf, Int nbuf, ARM64Instr* i,
-                      Bool mode64,
+                      Bool mode64, VexEndness endness_host,
                       void* disp_cp_chain_me_to_slowEP,
                       void* disp_cp_chain_me_to_fastEP,
                       void* disp_cp_xindir,
@@ -6866,7 +6866,7 @@
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_ARM64() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_ARM64(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -6917,7 +6917,7 @@
 /* How big is an event check?  See case for ARM64in_EvCheck in
    emit_ARM64Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_ARM64 ( void )
+Int evCheckSzB_ARM64 ( VexEndness endness_host )
 {
    return 24;
 }
@@ -6925,10 +6925,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                   void* place_to_chain,
                                    void* disp_cp_chain_me_EXPECTED,
                                    void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw x9, disp_cp_chain_me_to_EXPECTED[15:0]
         movk x9, disp_cp_chain_me_to_EXPECTED[31:15], lsl 16
@@ -6968,10 +6971,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
                                      void* place_to_jump_to_EXPECTED,
                                      void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw x9, place_to_jump_to_EXPECTED[15:0]
         movk x9, place_to_jump_to_EXPECTED[31:15], lsl 16
@@ -7009,7 +7015,8 @@
 
 //ZZ /* Patch the counter address into a profile inc point, as previously
 //ZZ    created by the ARMin_ProfInc case for emit_ARMInstr. */
-//ZZ VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+//ZZ VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ                                  void*  place_to_patch,
 //ZZ                                  ULong* location_of_counter )
 //ZZ {
 //ZZ    vassert(sizeof(ULong*) == 4);
diff --git a/priv/host_arm64_defs.h b/priv/host_arm64_defs.h
index 90bf4c1..f2f5bea 100644
--- a/priv/host_arm64_defs.h
+++ b/priv/host_arm64_defs.h
@@ -843,6 +843,7 @@
 extern Int  emit_ARM64Instr        ( /*MB_MOD*/Bool* is_profInc,
                                      UChar* buf, Int nbuf, ARM64Instr* i,
                                      Bool mode64,
+                                     VexEndness endness_host,
                                      void* disp_cp_chain_me_to_slowEP,
                                      void* disp_cp_chain_me_to_fastEP,
                                      void* disp_cp_xindir,
@@ -867,19 +868,22 @@
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM64 ( void );
+extern Int evCheckSzB_ARM64 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                          void* place_to_chain,
                                           void* disp_cp_chain_me_EXPECTED,
                                           void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
                                             void* place_to_jump_to_EXPECTED,
                                             void* disp_cp_chain_me );
 
 //ZZ /* Patch the counter location into an existing ProfInc point. */
-//ZZ extern VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+//ZZ extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ                                         void*  place_to_patch,
 //ZZ                                         ULong* location_of_counter );
 
 
diff --git a/priv/host_arm64_isel.c b/priv/host_arm64_isel.c
index dfbe146..6f2d8bc 100644
--- a/priv/host_arm64_isel.c
+++ b/priv/host_arm64_isel.c
@@ -7106,6 +7106,9 @@
    /* sanity ... */
    vassert(arch_host == VexArchARM64);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* guard against unexpected space regressions */
    vassert(sizeof(ARM64Instr) <= 32);
 
diff --git a/priv/host_arm_defs.c b/priv/host_arm_defs.c
index 8ce9385..ed41344 100644
--- a/priv/host_arm_defs.c
+++ b/priv/host_arm_defs.c
@@ -2971,7 +2971,7 @@
 
 Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, ARMInstr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -4644,7 +4644,7 @@
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_ARM() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_ARM(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -4695,7 +4695,7 @@
 /* How big is an event check?  See case for ARMin_EvCheck in
    emit_ARMInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_ARM ( void )
+Int evCheckSzB_ARM ( VexEndness endness_host )
 {
    return 24;
 }
@@ -4703,10 +4703,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw r12, lo16(disp_cp_chain_me_to_EXPECTED)
         movt r12, hi16(disp_cp_chain_me_to_EXPECTED)
@@ -4783,10 +4786,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         (general case)
           movw r12, lo16(place_to_jump_to_EXPECTED)
@@ -4844,9 +4850,11 @@
 
 /* Patch the counter address into a profile inc point, as previously
    created by the ARMin_ProfInc case for emit_ARMInstr. */
-VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 4);
    UInt* p = (UInt*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
diff --git a/priv/host_arm_defs.h b/priv/host_arm_defs.h
index f1ce4a7..7488982 100644
--- a/priv/host_arm_defs.h
+++ b/priv/host_arm_defs.h
@@ -1027,6 +1027,7 @@
 extern Int  emit_ARMInstr        ( /*MB_MOD*/Bool* is_profInc,
                                    UChar* buf, Int nbuf, ARMInstr* i, 
                                    Bool mode64,
+                                   VexEndness endness_host,
                                    void* disp_cp_chain_me_to_slowEP,
                                    void* disp_cp_chain_me_to_fastEP,
                                    void* disp_cp_xindir,
@@ -1051,19 +1052,22 @@
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM ( void );
+extern Int evCheckSzB_ARM ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter );
 
 
diff --git a/priv/host_arm_isel.c b/priv/host_arm_isel.c
index 537a355..8235282 100644
--- a/priv/host_arm_isel.c
+++ b/priv/host_arm_isel.c
@@ -6331,6 +6331,9 @@
    /* sanity ... */
    vassert(arch_host == VexArchARM);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* guard against unexpected space regressions */
    vassert(sizeof(ARMInstr) <= 28);
 
diff --git a/priv/host_mips_defs.c b/priv/host_mips_defs.c
index 1bf81b2..58e50d4 100644
--- a/priv/host_mips_defs.c
+++ b/priv/host_mips_defs.c
@@ -2920,6 +2920,7 @@
 Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
                      UChar* buf, Int nbuf, MIPSInstr* i,
                      Bool mode64,
+                     VexEndness endness_host,
                      void* disp_cp_chain_me_to_slowEP,
                      void* disp_cp_chain_me_to_fastEP,
                      void* disp_cp_xindir,
@@ -4229,7 +4230,7 @@
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_MIPS(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -4315,7 +4316,7 @@
 /* How big is an event check?  See case for Min_EvCheck in
    emit_MIPSInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_MIPS ( void )
+Int evCheckSzB_MIPS ( VexEndness endness_host )
 {
   UInt kInstrSize = 4;
   return 7*kInstrSize;
@@ -4323,11 +4324,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                  void* place_to_chain,
                                   void* disp_cp_chain_me_EXPECTED,
                                   void* place_to_jump_to,
                                   Bool  mode64 )
 {
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
    /* What we're expecting to see is:
         move r9, disp_cp_chain_me_to_EXPECTED
         jalr r9
@@ -4369,11 +4372,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                    void* place_to_unchain,
                                     void* place_to_jump_to_EXPECTED,
                                     void* disp_cp_chain_me,
                                     Bool  mode64 )
 {
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
    /* What we're expecting to see is:
         move r9, place_to_jump_to_EXPECTED
         jalr r9
@@ -4413,13 +4418,16 @@
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Min_ProfInc case for emit_MIPSInstr. */
-VexInvalRange patchProfInc_MIPS ( void*  place_to_patch,
+VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                  void*  place_to_patch,
                                   ULong* location_of_counter, Bool mode64 )
 {
-   if (mode64)
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+   if (mode64) {
       vassert(sizeof(ULong*) == 8);
-   else
+   } else {
       vassert(sizeof(ULong*) == 4);
+   }
    UChar* p = (UChar*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
    vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
diff --git a/priv/host_mips_defs.h b/priv/host_mips_defs.h
index 22881ee..cfce11b 100644
--- a/priv/host_mips_defs.h
+++ b/priv/host_mips_defs.h
@@ -715,6 +715,7 @@
 extern Int        emit_MIPSInstr        (/*MB_MOD*/Bool* is_profInc,
                                          UChar* buf, Int nbuf, MIPSInstr* i,
                                          Bool mode64,
+                                         VexEndness endness_host,
                                          void* disp_cp_chain_me_to_slowEP,
                                          void* disp_cp_chain_me_to_fastEP,
                                          void* disp_cp_xindir,
@@ -741,25 +742,28 @@
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_MIPS ( void );
+extern Int evCheckSzB_MIPS ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                         void* place_to_chain,
                                          void* disp_cp_chain_me_EXPECTED,
                                          void* place_to_jump_to,
                                          Bool  mode64 );
 
-extern VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                           void* place_to_unchain,
                                            void* place_to_jump_to_EXPECTED,
                                            void* disp_cp_chain_me,
                                            Bool  mode64 );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_MIPS ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                         void*  place_to_patch,
                                          ULong* location_of_counter,
                                          Bool  mode64 );
 
-#endif            /* ndef __LIBVEX_HOST_MIPS_HDEFS_H */
+#endif /* ndef __VEX_HOST_MIPS_DEFS_H */
 
 /*---------------------------------------------------------------*/
 /*--- end                                    host-mips_defs.h ---*/
diff --git a/priv/host_mips_isel.c b/priv/host_mips_isel.c
index 233367d..8ee3556 100644
--- a/priv/host_mips_isel.c
+++ b/priv/host_mips_isel.c
@@ -4173,6 +4173,10 @@
            || VEX_PRID_COMP_BROADCOM == hwcaps_host
            || VEX_PRID_COMP_NETLOGIC);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE
+           || archinfo_host->endness == VexEndnessBE);
+
    mode64 = arch_host != VexArchMIPS32;
 #if (__mips_fpr==64)
    fp_mode64 = ((VEX_MIPS_REV(hwcaps_host) == VEX_PRID_CPU_32FPR)
diff --git a/priv/host_ppc_defs.c b/priv/host_ppc_defs.c
index 7c98aeb..43101b0 100644
--- a/priv/host_ppc_defs.c
+++ b/priv/host_ppc_defs.c
@@ -3720,7 +3720,7 @@
 */
 Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, PPCInstr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -5707,7 +5707,7 @@
       /* nofail: */
 
       /* Crosscheck */
-      vassert(evCheckSzB_PPC() == (UChar*)p - (UChar*)p0);
+      vassert(evCheckSzB_PPC(endness_host) == (UChar*)p - (UChar*)p0);
       goto done;
    }
 
@@ -5772,7 +5772,7 @@
 /* How big is an event check?  See case for Pin_EvCheck in
    emit_PPCInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_PPC ( void )
+Int evCheckSzB_PPC ( VexEndness endness_host )
 {
   return 28;
 }
@@ -5780,11 +5780,18 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to,
                                  Bool  mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    /* What we're expecting to see is:
         imm32/64-fixed r30, disp_cp_chain_me_to_EXPECTED
         mtctr r30
@@ -5825,11 +5832,18 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me,
                                    Bool  mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    /* What we're expecting to see is:
         imm32/64-fixed r30, place_to_jump_to_EXPECTED
         mtctr r30
@@ -5870,10 +5884,17 @@
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Pin_ProfInc case for emit_PPCInstr. */
-VexInvalRange patchProfInc_PPC ( void*  place_to_patch,
+VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter,
                                  Bool   mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    UChar* p = (UChar*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
 
diff --git a/priv/host_ppc_defs.h b/priv/host_ppc_defs.h
index 635cc59..7f3043f 100644
--- a/priv/host_ppc_defs.h
+++ b/priv/host_ppc_defs.h
@@ -1138,6 +1138,7 @@
 extern Int          emit_PPCInstr        ( /*MB_MOD*/Bool* is_profInc,
                                            UChar* buf, Int nbuf, PPCInstr* i, 
                                            Bool mode64,
+                                           VexEndness endness_host,
                                            void* disp_cp_chain_me_to_slowEP,
                                            void* disp_cp_chain_me_to_fastEP,
                                            void* disp_cp_xindir,
@@ -1162,21 +1163,24 @@
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_PPC ( void );
+extern Int evCheckSzB_PPC ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to,
                                         Bool  mode64 );
 
-extern VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me,
                                           Bool  mode64 );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_PPC ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter,
                                         Bool   mode64 );
 
diff --git a/priv/host_ppc_isel.c b/priv/host_ppc_isel.c
index 7907075..d675120 100644
--- a/priv/host_ppc_isel.c
+++ b/priv/host_ppc_isel.c
@@ -5920,6 +5920,9 @@
       vassert((hwcaps_host & mask64) == 0);
    }
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessBE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
diff --git a/priv/host_s390_defs.c b/priv/host_s390_defs.c
index ba77250..a8e1eed 100644
--- a/priv/host_s390_defs.c
+++ b/priv/host_s390_defs.c
@@ -9831,7 +9831,8 @@
 
    The dispatch counter is a 32-bit value. */
 static UChar *
-s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn)
+s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn,
+                       VexEndness endness_host)
 {
    s390_amode *amode;
    UInt b, d;
@@ -9867,7 +9868,7 @@
    
    /* Make sure the size of the generated code is identical to the size
       returned by evCheckSzB_S390 */
-   vassert(evCheckSzB_S390() == code_end - code_begin);
+   vassert(evCheckSzB_S390(endness_host) == code_end - code_begin);
 
    return buf;
 }
@@ -9896,7 +9897,8 @@
 
 Int
 emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
-               Bool mode64, void *disp_cp_chain_me_to_slowEP,
+               Bool mode64, VexEndness endness_host,
+               void *disp_cp_chain_me_to_slowEP,
                void *disp_cp_chain_me_to_fastEP, void *disp_cp_xindir,
                void *disp_cp_xassisted)
 {
@@ -10057,7 +10059,7 @@
       break;
 
    case S390_INSN_EVCHECK:
-      end = s390_insn_evcheck_emit(buf, insn);
+      end = s390_insn_evcheck_emit(buf, insn, endness_host);
       break;
 
    case S390_INSN_XDIRECT:
@@ -10087,7 +10089,7 @@
 /* Return the number of bytes emitted for an S390_INSN_EVCHECK.
    See s390_insn_evcheck_emit */
 Int
-evCheckSzB_S390(void)
+evCheckSzB_S390(VexEndness endness_host)
 {
    return s390_host_has_gie ? 18 : 24;
 }
@@ -10096,7 +10098,8 @@
 /* Patch the counter address into CODE_TO_PATCH as previously
    generated by s390_insn_profinc_emit. */
 VexInvalRange
-patchProfInc_S390(void *code_to_patch, ULong *location_of_counter)
+patchProfInc_S390(VexEndness endness_host,
+                  void *code_to_patch, ULong *location_of_counter)
 {
    vassert(sizeof(ULong *) == 8);
 
@@ -10114,10 +10117,13 @@
 /* NB: what goes on here has to be very closely coordinated with the
    s390_insn_xdirect_emit code above. */
 VexInvalRange
-chainXDirect_S390(void *place_to_chain,
+chainXDirect_S390(VexEndness endness_host,
+                  void *place_to_chain,
                   void *disp_cp_chain_me_EXPECTED,
                   void *place_to_jump_to)
 {
+   vassert(endness_host == VexEndnessBE);
+
    /* What we're expecting to see @ PLACE_TO_CHAIN is:
 
         load  tchain_scratch, #disp_cp_chain_me_EXPECTED
@@ -10199,10 +10205,13 @@
 /* NB: what goes on here has to be very closely coordinated with the
    s390_insn_xdirect_emit code above. */
 VexInvalRange
-unchainXDirect_S390(void *place_to_unchain,
+unchainXDirect_S390(VexEndness endness_host,
+                    void *place_to_unchain,
                     void *place_to_jump_to_EXPECTED,
                     void *disp_cp_chain_me)
 {
+   vassert(endness_host == VexEndnessBE);
+
    /* What we're expecting to see @ PLACE_TO_UNCHAIN:
 
           load  tchain_scratch, #place_to_jump_to_EXPECTED
diff --git a/priv/host_s390_defs.h b/priv/host_s390_defs.h
index 5b6fc1f..7db4304 100644
--- a/priv/host_s390_defs.h
+++ b/priv/host_s390_defs.h
@@ -736,7 +736,7 @@
 void  mapRegs_S390Instr    ( HRegRemap *, s390_insn *, Bool );
 Bool  isMove_S390Instr     ( s390_insn *, HReg *, HReg * );
 Int   emit_S390Instr       ( Bool *, UChar *, Int, s390_insn *, Bool,
-                             void *, void *, void *, void *);
+                             VexEndness, void *, void *, void *, void *);
 void  getAllocableRegs_S390( Int *, HReg **, Bool );
 void  genSpill_S390        ( HInstr **, HInstr **, HReg , Int , Bool );
 void  genReload_S390       ( HInstr **, HInstr **, HReg , Int , Bool );
@@ -745,19 +745,22 @@
                              Int, Int, Bool, Bool, Addr64);
 
 /* Return the number of bytes of code needed for an event check */
-Int evCheckSzB_S390(void);
+Int evCheckSzB_S390(VexEndness endness_host);
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-VexInvalRange chainXDirect_S390(void *place_to_chain,
+VexInvalRange chainXDirect_S390(VexEndness endness_host,
+                                void *place_to_chain,
                                 void *disp_cp_chain_me_EXPECTED,
                                 void *place_to_jump_to);
 
-VexInvalRange unchainXDirect_S390(void *place_to_unchain,
+VexInvalRange unchainXDirect_S390(VexEndness endness_host,
+                                  void *place_to_unchain,
                                   void *place_to_jump_to_EXPECTED,
                                   void *disp_cp_chain_me);
 
 /* Patch the counter location into an existing ProfInc point. */
-VexInvalRange patchProfInc_S390(void  *code_to_patch,
+VexInvalRange patchProfInc_S390(VexEndness endness_host,
+                                void  *code_to_patch,
                                 ULong *location_of_counter);
 
 /* KLUDGE: See detailled comment in host_s390_defs.c. */
diff --git a/priv/host_s390_isel.c b/priv/host_s390_isel.c
index 2e4df3c..cad0e7e 100644
--- a/priv/host_s390_isel.c
+++ b/priv/host_s390_isel.c
@@ -4094,6 +4094,9 @@
    /* Do some sanity checks */
    vassert((VEX_HWCAPS_S390X(hwcaps_host) & ~(VEX_HWCAPS_S390X_ALL)) == 0);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessBE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
diff --git a/priv/host_x86_defs.c b/priv/host_x86_defs.c
index 8f5fcfe..54bad38 100644
--- a/priv/host_x86_defs.c
+++ b/priv/host_x86_defs.c
@@ -2102,7 +2102,7 @@
 
 Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, X86Instr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -3291,7 +3291,7 @@
       p = doAMode_M(p, fake(4), i->Xin.EvCheck.amFailAddr);
       vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
       /* And crosscheck .. */
-      vassert(evCheckSzB_X86() == 8);
+      vassert(evCheckSzB_X86(endness_host) == 8);
       goto done;
    }
 
@@ -3336,7 +3336,7 @@
 /* How big is an event check?  See case for Xin_EvCheck in
    emit_X86Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_X86 ( void )
+Int evCheckSzB_X86 ( VexEndness endness_host )
 {
    return 8;
 }
@@ -3344,10 +3344,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movl $disp_cp_chain_me_EXPECTED, %edx
         call *%edx
@@ -3389,10 +3392,13 @@
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
           jmp d32
           ud2;
@@ -3432,9 +3438,11 @@
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Xin_ProfInc case for emit_X86Instr. */
-VexInvalRange patchProfInc_X86 ( void*  place_to_patch,
+VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 4);
    UChar* p = (UChar*)place_to_patch;
    vassert(p[0] == 0x83);
diff --git a/priv/host_x86_defs.h b/priv/host_x86_defs.h
index 96e2c5c..49676c9 100644
--- a/priv/host_x86_defs.h
+++ b/priv/host_x86_defs.h
@@ -717,6 +717,7 @@
 extern Int          emit_X86Instr        ( /*MB_MOD*/Bool* is_profInc,
                                            UChar* buf, Int nbuf, X86Instr* i, 
                                            Bool mode64,
+                                           VexEndness endness_host,
                                            void* disp_cp_chain_me_to_slowEP,
                                            void* disp_cp_chain_me_to_fastEP,
                                            void* disp_cp_xindir,
@@ -745,19 +746,22 @@
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_X86 ( void );
+extern Int evCheckSzB_X86 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_X86 ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter );
 
 
diff --git a/priv/host_x86_isel.c b/priv/host_x86_isel.c
index 16152ba..5fe816b 100644
--- a/priv/host_x86_isel.c
+++ b/priv/host_x86_isel.c
@@ -4440,6 +4440,9 @@
    vassert(sizeof(max_ga) == 8);
    vassert((max_ga >> 32) == 0);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
diff --git a/priv/main_main.c b/priv/main_main.c
index 4472cdf..cf9789b 100644
--- a/priv/main_main.c
+++ b/priv/main_main.c
@@ -224,7 +224,7 @@
    HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*, VexAbiInfo*,
                                   Int, Int, Bool, Bool, Addr64 );
    Int          (*emit)         ( /*MB_MOD*/Bool*,
-                                  UChar*, Int, HInstr*, Bool,
+                                  UChar*, Int, HInstr*, Bool, VexEndness,
                                   void*, void*, void*, void* );
    IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
    Bool         (*preciseMemExnsFn) ( Int, Int );
@@ -232,7 +232,6 @@
    DisOneInstrFn disInstrFn;
 
    VexGuestLayout* guest_layout;
-   Bool            host_is_bigendian = False;
    IRSB*           irsb;
    HInstrArray*    vcode;
    HInstrArray*    rcode;
@@ -311,12 +310,12 @@
          ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
          ppReg        = (void(*)(HReg)) ppHRegX86;
          iselSB       = iselSB_X86;
-         emit         = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit         = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                         emit_X86Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchAMD64:
@@ -334,12 +333,12 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
          ppReg       = (void(*)(HReg)) ppHRegAMD64;
          iselSB      = iselSB_AMD64;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_AMD64Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchPPC32:
@@ -354,12 +353,12 @@
          ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
          ppReg       = (void(*)(HReg)) ppHRegPPC;
          iselSB      = iselSB_PPC;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_PPCInstr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchPPC64:
@@ -374,12 +373,13 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
          ppReg       = (void(*)(HReg)) ppHRegPPC;
          iselSB      = iselSB_PPC;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_PPCInstr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE
+                 /* later: || vta->archinfo_host.endness == VexEndnessLE */);
          break;
 
       case VexArchS390X:
@@ -394,11 +394,11 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppS390Instr;
          ppReg       = (void(*)(HReg)) ppHRegS390;
          iselSB      = iselSB_S390;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*)) emit_S390Instr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchARM:
@@ -413,12 +413,12 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
          ppReg       = (void(*)(HReg)) ppHRegARM;
          iselSB      = iselSB_ARM;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_ARMInstr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchARM64:
@@ -437,12 +437,12 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppARM64Instr;
          ppReg       = (void(*)(HReg)) ppHRegARM64;
          iselSB      = iselSB_ARM64;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_ARM64Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchMIPS32:
@@ -457,16 +457,13 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
          ppReg       = (void(*)(HReg)) ppHRegMIPS;
          iselSB      = iselSB_MIPS;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_MIPSInstr;
-#        if defined(VKI_LITTLE_ENDIAN)
-         host_is_bigendian = False;
-#        elif defined(VKI_BIG_ENDIAN)
-         host_is_bigendian = True;
-#        endif
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchMIPS64:
@@ -481,16 +478,13 @@
          ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
          ppReg       = (void(*)(HReg)) ppHRegMIPS;
          iselSB      = iselSB_MIPS;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_MIPSInstr;
-#        if defined(VKI_LITTLE_ENDIAN)
-         host_is_bigendian = False;
-#        elif defined(VKI_BIG_ENDIAN)
-         host_is_bigendian = True;
-#        endif
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       default:
@@ -514,6 +508,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestX86State) % 16);
          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
@@ -534,6 +529,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestAMD64State) % 16);
          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
@@ -554,6 +550,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestPPC32State) % 16);
          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
@@ -574,6 +571,8 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE
+                 /* later: || vta->archinfo_guest.endness == VexEndnessBE */);
          vassert(0 == sizeof(VexGuestPPC64State) % 16);
          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
@@ -595,6 +594,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestS390XState) % 16);
          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
@@ -615,6 +615,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestARMState) % 16);
          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
@@ -635,6 +636,7 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestARM64State) % 16);
          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
@@ -655,6 +657,8 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestMIPS32State) % 16);
          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
@@ -675,6 +679,8 @@
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestMIPS64State) % 16);
          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
@@ -698,6 +704,8 @@
          we are simulating one flavour of an architecture a different
          flavour of the same architecture, which is pretty strange. */
       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
+      /* ditto */
+      vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
    }
 
    vexAllocSanityCheck();
@@ -715,7 +723,7 @@
                      vta->guest_bytes, 
                      vta->guest_bytes_addr,
                      vta->chase_into_ok,
-                     host_is_bigendian,
+                     vta->archinfo_host.endness,
                      vta->sigill_diag,
                      vta->arch_guest,
                      &vta->archinfo_guest,
@@ -936,7 +944,8 @@
          vex_printf("\n");
       }
       j = emit( &hi_isProfInc,
-                insn_bytes, sizeof insn_bytes, hi, mode64,
+                insn_bytes, sizeof insn_bytes, hi,
+                mode64, vta->archinfo_host.endness,
                 vta->disp_cp_chain_me_to_slowEP,
                 vta->disp_cp_chain_me_to_fastEP,
                 vta->disp_cp_xindir,
@@ -993,12 +1002,13 @@
 
 /* --------- Chain/Unchain XDirects. --------- */
 
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
-                             void*   place_to_chain,
-                             void*   disp_cp_chain_me_EXPECTED,
-                             void*   place_to_jump_to )
+VexInvalRange LibVEX_Chain ( VexArch    arch_host,
+                             VexEndness endness_host,
+                             void*      place_to_chain,
+                             void*      disp_cp_chain_me_EXPECTED,
+                             void*      place_to_jump_to )
 {
-   VexInvalRange (*chainXDirect)(void*, void*, void*) = NULL;
+   VexInvalRange (*chainXDirect)(VexEndness, void*, void*, void*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          chainXDirect = chainXDirect_X86; break;
@@ -1011,19 +1021,23 @@
       case VexArchS390X:
          chainXDirect = chainXDirect_S390; break;
       case VexArchPPC32:
-         return chainXDirect_PPC(place_to_chain,
+         return chainXDirect_PPC(endness_host,
+                                 place_to_chain,
                                  disp_cp_chain_me_EXPECTED,
                                  place_to_jump_to, False/*!mode64*/);
       case VexArchPPC64:
-         return chainXDirect_PPC(place_to_chain,
+         return chainXDirect_PPC(endness_host,
+                                 place_to_chain,
                                  disp_cp_chain_me_EXPECTED,
                                  place_to_jump_to, True/*mode64*/);
       case VexArchMIPS32:
-         return chainXDirect_MIPS(place_to_chain,
+         return chainXDirect_MIPS(endness_host,
+                                  place_to_chain,
                                   disp_cp_chain_me_EXPECTED,
                                   place_to_jump_to, False/*!mode64*/);
       case VexArchMIPS64:
-         return chainXDirect_MIPS(place_to_chain,
+         return chainXDirect_MIPS(endness_host,
+                                  place_to_chain,
                                   disp_cp_chain_me_EXPECTED,
                                   place_to_jump_to, True/*!mode64*/);
       default:
@@ -1031,17 +1045,18 @@
    }
    vassert(chainXDirect);
    VexInvalRange vir
-      = chainXDirect(place_to_chain, disp_cp_chain_me_EXPECTED,
-                     place_to_jump_to);
+      = chainXDirect(endness_host, place_to_chain,
+                     disp_cp_chain_me_EXPECTED, place_to_jump_to);
    return vir;
 }
 
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
-                               void*   place_to_unchain,
-                               void*   place_to_jump_to_EXPECTED,
-                               void*   disp_cp_chain_me )
+VexInvalRange LibVEX_UnChain ( VexArch    arch_host,
+                               VexEndness endness_host,
+                               void*      place_to_unchain,
+                               void*      place_to_jump_to_EXPECTED,
+                               void*      disp_cp_chain_me )
 {
-   VexInvalRange (*unchainXDirect)(void*, void*, void*) = NULL;
+   VexInvalRange (*unchainXDirect)(VexEndness, void*, void*, void*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          unchainXDirect = unchainXDirect_X86; break;
@@ -1054,19 +1069,23 @@
       case VexArchS390X:
          unchainXDirect = unchainXDirect_S390; break;
       case VexArchPPC32:
-         return unchainXDirect_PPC(place_to_unchain,
+         return unchainXDirect_PPC(endness_host,
+                                   place_to_unchain,
                                    place_to_jump_to_EXPECTED,
                                    disp_cp_chain_me, False/*!mode64*/);
       case VexArchPPC64:
-         return unchainXDirect_PPC(place_to_unchain,
+         return unchainXDirect_PPC(endness_host,
+                                   place_to_unchain,
                                    place_to_jump_to_EXPECTED,
                                    disp_cp_chain_me, True/*mode64*/);
       case VexArchMIPS32:
-         return unchainXDirect_MIPS(place_to_unchain,
+         return unchainXDirect_MIPS(endness_host,
+                                    place_to_unchain,
                                     place_to_jump_to_EXPECTED,
                                     disp_cp_chain_me, False/*!mode64*/);
       case VexArchMIPS64:
-         return unchainXDirect_MIPS(place_to_unchain,
+         return unchainXDirect_MIPS(endness_host,
+                                    place_to_unchain,
                                     place_to_jump_to_EXPECTED,
                                     disp_cp_chain_me, True/*!mode64*/);
       default:
@@ -1074,32 +1093,33 @@
    }
    vassert(unchainXDirect);
    VexInvalRange vir
-      = unchainXDirect(place_to_unchain, place_to_jump_to_EXPECTED,
-                       disp_cp_chain_me);
+      = unchainXDirect(endness_host, place_to_unchain,
+                       place_to_jump_to_EXPECTED, disp_cp_chain_me);
    return vir;
 }
 
-Int LibVEX_evCheckSzB ( VexArch arch_host )
+Int LibVEX_evCheckSzB ( VexArch    arch_host,
+                        VexEndness endness_host )
 {
    static Int cached = 0; /* DO NOT MAKE NON-STATIC */
    if (UNLIKELY(cached == 0)) {
       switch (arch_host) {
          case VexArchX86:
-            cached = evCheckSzB_X86(); break;
+            cached = evCheckSzB_X86(endness_host); break;
          case VexArchAMD64:
-            cached = evCheckSzB_AMD64(); break;
+            cached = evCheckSzB_AMD64(endness_host); break;
          case VexArchARM:
-            cached = evCheckSzB_ARM(); break;
+            cached = evCheckSzB_ARM(endness_host); break;
          case VexArchARM64:
-            cached = evCheckSzB_ARM64(); break;
+            cached = evCheckSzB_ARM64(endness_host); break;
          case VexArchS390X:
-            cached = evCheckSzB_S390(); break;
+            cached = evCheckSzB_S390(endness_host); break;
          case VexArchPPC32:
          case VexArchPPC64:
-            cached = evCheckSzB_PPC(); break;
+            cached = evCheckSzB_PPC(endness_host); break;
          case VexArchMIPS32:
          case VexArchMIPS64:
-            cached = evCheckSzB_MIPS(); break;
+            cached = evCheckSzB_MIPS(endness_host); break;
          default:
             vassert(0);
       }
@@ -1107,11 +1127,12 @@
    return cached;
 }
 
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
-                                    void*   place_to_patch,
-                                    ULong*  location_of_counter )
+VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
+                                    VexEndness endness_host,
+                                    void*      place_to_patch,
+                                    ULong*     location_of_counter )
 {
-   VexInvalRange (*patchProfInc)(void*,ULong*) = NULL;
+   VexInvalRange (*patchProfInc)(VexEndness,void*,ULong*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          patchProfInc = patchProfInc_X86; break;
@@ -1122,23 +1143,23 @@
       case VexArchS390X:
          patchProfInc = patchProfInc_S390; break;
       case VexArchPPC32:
-         return patchProfInc_PPC(place_to_patch,
+         return patchProfInc_PPC(endness_host, place_to_patch,
                                  location_of_counter, False/*!mode64*/);
       case VexArchPPC64:
-         return patchProfInc_PPC(place_to_patch,
+         return patchProfInc_PPC(endness_host, place_to_patch,
                                  location_of_counter, True/*mode64*/);
       case VexArchMIPS32:
-         return patchProfInc_MIPS(place_to_patch,
+         return patchProfInc_MIPS(endness_host, place_to_patch,
                                   location_of_counter, False/*!mode64*/);
       case VexArchMIPS64:
-         return patchProfInc_MIPS(place_to_patch,
+         return patchProfInc_MIPS(endness_host, place_to_patch,
                                   location_of_counter, True/*!mode64*/);
       default:
          vassert(0);
    }
    vassert(patchProfInc);
    VexInvalRange vir
-      = patchProfInc(place_to_patch, location_of_counter);
+      = patchProfInc(endness_host, place_to_patch, location_of_counter);
    return vir;
 }
 
@@ -1216,6 +1237,16 @@
    }
 }
 
+const HChar* LibVEX_ppVexEndness ( VexEndness endness )
+{
+   switch (endness) {
+      case VexEndness_INVALID: return "INVALID";
+      case VexEndnessLE:       return "LittleEndian";
+      case VexEndnessBE:       return "BigEndian";
+      default:                 return "VexEndness???";
+   }
+}
+
 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
 {
    const HChar* str = show_hwcaps(arch,hwcaps);
@@ -1227,10 +1258,11 @@
 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
 {
    vex_bzero(vai, sizeof(*vai));
-   vai->hwcaps              = 0;
-   vai->ppc_icache_line_szB = 0;
-   vai->ppc_dcbz_szB        = 0;
-   vai->ppc_dcbzl_szB       = 0;
+   vai->hwcaps                  = 0;
+   vai->endness                 = VexEndness_INVALID;
+   vai->ppc_icache_line_szB     = 0;
+   vai->ppc_dcbz_szB            = 0;
+   vai->ppc_dcbzl_szB           = 0;
    vai->arm64_dMinLine_lg2_szB  = 0;
    vai->arm64_iMinLine_lg2_szB  = 0;
    vai->hwcache_info.num_levels = 0;
diff --git a/pub/libvex.h b/pub/libvex.h
index 2dfb419..3019ded 100644
--- a/pub/libvex.h
+++ b/pub/libvex.h
@@ -51,7 +51,7 @@
 
 typedef 
    enum { 
-      VexArch_INVALID,
+      VexArch_INVALID=0x400,
       VexArchX86, 
       VexArchAMD64, 
       VexArchARM,
@@ -65,6 +65,16 @@
    VexArch;
 
 
+/* Information about endianness. */
+typedef
+   enum {
+      VexEndness_INVALID=0x600, /* unknown endianness */
+      VexEndnessLE,             /* little endian */
+      VexEndnessBE              /* big endian */
+   }
+   VexEndness;
+
+
 /* For a given architecture, these specify extra capabilities beyond
    the minimum supported (baseline) capabilities.  They may be OR'd
    together, although some combinations don't make sense.  (eg, SSE2
@@ -220,12 +230,13 @@
 /* These return statically allocated strings. */
 
 extern const HChar* LibVEX_ppVexArch    ( VexArch );
+extern const HChar* LibVEX_ppVexEndness ( VexEndness endness );
 extern const HChar* LibVEX_ppVexHwCaps  ( VexArch, UInt );
 
 
 /* The various kinds of caches */
 typedef enum {
-   DATA_CACHE,
+   DATA_CACHE=0x500,
    INSN_CACHE,
    UNIFIED_CACHE
 } VexCacheKind;
@@ -270,8 +281,9 @@
 
 typedef
    struct {
-      /* The following two fields are mandatory. */
-      UInt hwcaps;
+      /* The following three fields are mandatory. */
+      UInt         hwcaps;
+      VexEndness   endness;
       VexCacheInfo hwcache_info;
       /* PPC32/PPC64 only: size of instruction cache line */
       Int ppc_icache_line_szB;
@@ -389,7 +401,7 @@
    points.
 
    VexRegUpdAllregsAtEachInsn : all registers up to date at each instruction. */
-typedef enum { VexRegUpdSpAtMemAccess,
+typedef enum { VexRegUpdSpAtMemAccess=0x700,
                VexRegUpdUnwindregsAtMemAccess,
                VexRegUpdAllregsAtMemAccess,
                VexRegUpdAllregsAtEachInsn } VexRegisterUpdates;
@@ -583,7 +595,7 @@
 typedef
    struct {
       /* overall status */
-      enum { VexTransOK,
+      enum { VexTransOK=0x800,
              VexTransAccessFail, VexTransOutputFull } status;
       /* The number of extents that have a self-check (0 to 3) */
       UInt n_sc_extents;
@@ -778,35 +790,39 @@
    currently contains a call to the dispatcher specified by
    disp_cp_chain_me_EXPECTED. */
 extern
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
-                             void*   place_to_chain,
-                             void*   disp_cp_chain_me_EXPECTED,
-                             void*   place_to_jump_to );
+VexInvalRange LibVEX_Chain ( VexArch    arch_host,
+                             VexEndness endhess_host,
+                             void*      place_to_chain,
+                             void*      disp_cp_chain_me_EXPECTED,
+                             void*      place_to_jump_to );
 
 /* Undo an XDirect jump located at place_to_unchain, so it is
    converted back into a call to disp_cp_chain_me.  It is expected
    (and checked) that this site currently contains a jump directly to
    the address specified by place_to_jump_to_EXPECTED. */
 extern
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
-                               void*   place_to_unchain,
-                               void*   place_to_jump_to_EXPECTED,
-                               void*   disp_cp_chain_me );
+VexInvalRange LibVEX_UnChain ( VexArch    arch_host,
+                               VexEndness endness_host,
+                               void*      place_to_unchain,
+                               void*      place_to_jump_to_EXPECTED,
+                               void*      disp_cp_chain_me );
 
 /* Returns a constant -- the size of the event check that is put at
    the start of every translation.  This makes it possible to
    calculate the fast entry point address if the slow entry point
    address is known (the usual case), or vice versa. */
 extern
-Int LibVEX_evCheckSzB ( VexArch arch_host );
+Int LibVEX_evCheckSzB ( VexArch    arch_host,
+                        VexEndness endness_host );
 
 
 /* Patch the counter location into an existing ProfInc point.  The
    specified point is checked to make sure it is plausible. */
 extern
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
-                                    void*   place_to_patch,
-                                    ULong*  location_of_counter );
+VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
+                                    VexEndness endness_host,
+                                    void*      place_to_patch,
+                                    ULong*     location_of_counter );
 
 
 /*-------------------------------------------------------*/