Change AMD64Instr_CMov64 so that the source can only be a register
instead of register-or-memory (an AMD64RM).  This avoids duplicating
conditional load functionality introduced in r3075 via
AMD64Instr_CLoad and in practice has no effect on the quality of the
generated code.



git-svn-id: svn://svn.valgrind.org/vex/trunk@3076 8f6e269a-dfd6-0310-a8e1-e2731360e62c
diff --git a/priv/host_amd64_defs.c b/priv/host_amd64_defs.c
index 13925fa..fdbf05e 100644
--- a/priv/host_amd64_defs.c
+++ b/priv/host_amd64_defs.c
@@ -736,7 +736,7 @@
    return i;
 }
 
-AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, AMD64RM* src, HReg dst ) {
+AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
    AMD64Instr* i      = LibVEX_Alloc(sizeof(AMD64Instr));
    i->tag             = Ain_CMov64;
    i->Ain.CMov64.cond = cond;
@@ -1128,7 +1128,7 @@
 
       case Ain_CMov64:
          vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond));
-         ppAMD64RM(i->Ain.CMov64.src);
+         ppHRegAMD64(i->Ain.CMov64.src);
          vex_printf(",");
          ppHRegAMD64(i->Ain.CMov64.dst);
          return;
@@ -1481,7 +1481,7 @@
          addRegUsage_AMD64AMode(u, i->Ain.XAssisted.amRIP);
          return;
       case Ain_CMov64:
-         addRegUsage_AMD64RM(u, i->Ain.CMov64.src, HRmRead);
+         addHRegUse(u, HRmRead,   i->Ain.CMov64.src);
          addHRegUse(u, HRmModify, i->Ain.CMov64.dst);
          return;
       case Ain_CLoad:
@@ -1717,7 +1717,7 @@
          mapRegs_AMD64AMode(m, i->Ain.XAssisted.amRIP);
          return;
       case Ain_CMov64:
-         mapRegs_AMD64RM(m, i->Ain.CMov64.src);
+         mapReg(m, &i->Ain.CMov64.src);
          mapReg(m, &i->Ain.CMov64.dst);
          return;
       case Ain_CLoad:
@@ -3000,21 +3000,11 @@
 
    case Ain_CMov64:
       vassert(i->Ain.CMov64.cond != Acc_ALWAYS);
-      if (i->Ain.CMov64.src->tag == Arm_Reg) {
-         *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
-         *p++ = 0x0F;
-         *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
-         p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
-         goto done;
-      }
-      if (i->Ain.CMov64.src->tag == Arm_Mem) {
-         *p++ = rexAMode_M(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
-         *p++ = 0x0F;
-         *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
-         p = doAMode_M(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
-         goto done;
-      }
-      break;
+      *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      *p++ = 0x0F;
+      *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
+      p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      goto done;
 
    case Ain_CLoad: {
       vassert(i->Ain.CLoad.cond != Acc_ALWAYS);
diff --git a/priv/host_amd64_defs.h b/priv/host_amd64_defs.h
index 02c89e2..6ebe9b6 100644
--- a/priv/host_amd64_defs.h
+++ b/priv/host_amd64_defs.h
@@ -367,7 +367,7 @@
       Ain_XDirect,     /* direct transfer to GA */
       Ain_XIndir,      /* indirect transfer to GA */
       Ain_XAssisted,   /* assisted transfer to GA */
-      Ain_CMov64,      /* conditional move */
+      Ain_CMov64,      /* conditional move, 64-bit reg-reg only */
       Ain_CLoad,       /* cond. load to int reg, 32 bit ZX or 64 bit only */
       Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
       Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
@@ -503,7 +503,7 @@
             be the bogus Acc_ALWAYS. */
          struct {
             AMD64CondCode cond;
-            AMD64RM*      src;
+            HReg          src;
             HReg          dst;
          } CMov64;
          /* conditional load to int reg, 32 bit ZX or 64 bit only.
@@ -718,7 +718,7 @@
                                            AMD64CondCode cond );
 extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
                                            AMD64CondCode cond, IRJumpKind jk );
-extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, AMD64RM* src, HReg dst );
+extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, HReg src, HReg dst );
 extern AMD64Instr* AMD64Instr_CLoad      ( AMD64CondCode cond, UChar szB,
                                            AMD64AMode* addr, HReg dst );
 extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
diff --git a/priv/host_amd64_isel.c b/priv/host_amd64_isel.c
index 7926294..999ce95 100644
--- a/priv/host_amd64_isel.c
+++ b/priv/host_amd64_isel.c
@@ -1242,7 +1242,7 @@
          HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
          addInstr(env, mk_iMOVsd_RR(src1, dst));
          addInstr(env, AMD64Instr_Alu32R(Aalu_CMP, AMD64RMI_Reg(src2), dst));
-         addInstr(env, AMD64Instr_CMov64(Acc_B, AMD64RM_Reg(src2), dst));
+         addInstr(env, AMD64Instr_CMov64(Acc_B, src2, dst));
          return dst;
       }
 
@@ -1862,7 +1862,7 @@
       if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
           && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
          HReg     r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
-         AMD64RM* r0  = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
+         HReg     r0  = iselIntExpr_R(env, e->Iex.ITE.iffalse);
          HReg     dst = newVRegI(env);
          addInstr(env, mk_iMOVsd_RR(r1,dst));
          AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
@@ -4650,8 +4650,7 @@
             default: goto unhandled_cas;
          }
          addInstr(env, AMD64Instr_ACAS(am, sz));
-         addInstr(env, AMD64Instr_CMov64(
-                          Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOld));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOld));
          return;
       } else {
          /* double CAS */
@@ -4689,12 +4688,8 @@
          addInstr(env, mk_iMOVsd_RR(rDataHi, hregAMD64_RCX()));
          addInstr(env, mk_iMOVsd_RR(rDataLo, hregAMD64_RBX()));
          addInstr(env, AMD64Instr_DACAS(am, sz));
-         addInstr(env,
-                  AMD64Instr_CMov64(
-                     Acc_NZ, AMD64RM_Reg(hregAMD64_RDX()), rOldHi));
-         addInstr(env,
-                  AMD64Instr_CMov64(
-                     Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOldLo));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RDX(), rOldHi));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOldLo));
          return;
       }
       unhandled_cas: