Rename IROps for reciprocal estimate, reciprocal step, reciprocal sqrt
estimate and reciprocal sqrt step, to be more consistent.  Remove
64FxWhatever versions of those ops since they are never used.  As a
side effect, observe that RSqrt32Fx4 and Rsqrte32Fx4 are the same and
hence fix the duplication, at the same time.  No functional change.


git-svn-id: svn://svn.valgrind.org/vex/trunk@2932 8f6e269a-dfd6-0310-a8e1-e2731360e62c
diff --git a/priv/guest_amd64_toIR.c b/priv/guest_amd64_toIR.c
index 9ee8319..78df883 100644
--- a/priv/guest_amd64_toIR.c
+++ b/priv/guest_amd64_toIR.c
@@ -12671,13 +12671,13 @@
       /* F3 0F 52 = RSQRTSS -- approx reciprocal sqrt 32F0x4 from R/M to R */
       if (haveF3no66noF2(pfx) && sz == 4) {
          delta = dis_SSE_E_to_G_unary_lo32( vbi, pfx, delta, 
-                                            "rsqrtss", Iop_RSqrt32F0x4 );
+                                            "rsqrtss", Iop_RSqrtEst32F0x4 );
          goto decode_success;
       }
       /* 0F 52 = RSQRTPS -- approx reciprocal sqrt 32Fx4 from R/M to R */
       if (haveNo66noF2noF3(pfx) && sz == 4) {
          delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta, 
-                                           "rsqrtps", Iop_RSqrt32Fx4 );
+                                           "rsqrtps", Iop_RSqrtEst32Fx4 );
          goto decode_success;
       }
       break;
@@ -12686,13 +12686,13 @@
       /* F3 0F 53 = RCPSS -- approx reciprocal 32F0x4 from R/M to R */
       if (haveF3no66noF2(pfx) && sz == 4) {
          delta = dis_SSE_E_to_G_unary_lo32( vbi, pfx, delta,
-                                            "rcpss", Iop_Recip32F0x4 );
+                                            "rcpss", Iop_RecipEst32F0x4 );
          goto decode_success;
       }
       /* 0F 53 = RCPPS -- approx reciprocal 32Fx4 from R/M to R */
       if (haveNo66noF2noF3(pfx) && sz == 4) {
          delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta,
-                                           "rcpps", Iop_Recip32Fx4 );
+                                           "rcpps", Iop_RecipEst32Fx4 );
          goto decode_success;
       }
       break;
@@ -24365,19 +24365,20 @@
       /* VRSQRTSS xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F3.0F.WIG 52 /r */
       if (haveF3no66noF2(pfx)) {
          delta = dis_AVX128_E_V_to_G_lo32_unary(
-                    uses_vvvv, vbi, pfx, delta, "vrsqrtss", Iop_RSqrt32F0x4 );
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtss",
+                    Iop_RSqrtEst32F0x4 );
          goto decode_success;
       }
       /* VRSQRTPS xmm2/m128(E), xmm1(G) = VEX.NDS.128.0F.WIG 52 /r */
       if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
          delta = dis_AVX128_E_to_G_unary_all(
-                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrt32Fx4 );
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrtEst32Fx4 );
          goto decode_success;
       }
       /* VRSQRTPS ymm2/m256(E), ymm1(G) = VEX.NDS.256.0F.WIG 52 /r */
       if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
          delta = dis_AVX256_E_to_G_unary_all(
-                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrt32Fx8 );
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrtEst32Fx8 );
          goto decode_success;
       }
       break;
@@ -24386,19 +24387,19 @@
       /* VRCPSS xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F3.0F.WIG 53 /r */
       if (haveF3no66noF2(pfx)) {
          delta = dis_AVX128_E_V_to_G_lo32_unary(
-                    uses_vvvv, vbi, pfx, delta, "vrcpss", Iop_Recip32F0x4 );
+                    uses_vvvv, vbi, pfx, delta, "vrcpss", Iop_RecipEst32F0x4 );
          goto decode_success;
       }
       /* VRCPPS xmm2/m128(E), xmm1(G) = VEX.NDS.128.0F.WIG 53 /r */
       if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
          delta = dis_AVX128_E_to_G_unary_all(
-                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_Recip32Fx4 );
+                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_RecipEst32Fx4 );
          goto decode_success;
       }
       /* VRCPPS ymm2/m256(E), ymm1(G) = VEX.NDS.256.0F.WIG 53 /r */
       if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
          delta = dis_AVX256_E_to_G_unary_all(
-                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_Recip32Fx8 );
+                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_RecipEst32Fx8 );
          goto decode_success;
       }
       break;
diff --git a/priv/guest_arm_toIR.c b/priv/guest_arm_toIR.c
index 2070254..6b2f2de 100644
--- a/priv/guest_arm_toIR.c
+++ b/priv/guest_arm_toIR.c
@@ -4790,7 +4790,8 @@
                   /* VRECPS */
                   if ((theInstr >> 20) & 1)
                      return False;
-                  assign(res, binop(Q ? Iop_Recps32Fx4 : Iop_Recps32Fx2,
+                  assign(res, binop(Q ? Iop_RecipStep32Fx4
+                                      : Iop_RecipStep32Fx2,
                                     mkexpr(arg_n),
                                     mkexpr(arg_m)));
                   DIP("vrecps.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
@@ -4799,7 +4800,8 @@
                   /* VRSQRTS  */
                   if ((theInstr >> 20) & 1)
                      return False;
-                  assign(res, binop(Q ? Iop_Rsqrts32Fx4 : Iop_Rsqrts32Fx2,
+                  assign(res, binop(Q ? Iop_RSqrtStep32Fx4
+                                      : Iop_RSqrtStep32Fx2,
                                     mkexpr(arg_n),
                                     mkexpr(arg_m)));
                   DIP("vrsqrts.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
@@ -7489,11 +7491,11 @@
             if (size != 2)
                return False;
             if (Q) {
-               op = F ? Iop_Recip32Fx4 : Iop_Recip32x4;
+               op = F ? Iop_RecipEst32Fx4 : Iop_RecipEst32Ux4;
                putQReg(dreg, unop(op, getQReg(mreg)), condT);
                DIP("vrecpe.%c32 q%u, q%u\n", F ? 'f' : 'u', dreg, mreg);
             } else {
-               op = F ? Iop_Recip32Fx2 : Iop_Recip32x2;
+               op = F ? Iop_RecipEst32Fx2 : Iop_RecipEst32Ux2;
                putDRegI64(dreg, unop(op, getDRegI64(mreg)), condT);
                DIP("vrecpe.%c32 d%u, d%u\n", F ? 'f' : 'u', dreg, mreg);
             }
@@ -7506,10 +7508,10 @@
                return False;
             if (F) {
                /* fp */
-               op = Q ? Iop_Rsqrte32Fx4 : Iop_Rsqrte32Fx2;
+               op = Q ? Iop_RSqrtEst32Fx4 : Iop_RSqrtEst32Fx2;
             } else {
                /* unsigned int */
-               op = Q ? Iop_Rsqrte32x4 : Iop_Rsqrte32x2;
+               op = Q ? Iop_RSqrtEst32Ux4 : Iop_RSqrtEst32Ux2;
             }
             if (Q) {
                putQReg(dreg, unop(op, getQReg(mreg)), condT);
diff --git a/priv/guest_ppc_toIR.c b/priv/guest_ppc_toIR.c
index fc289ed..adabf64 100644
--- a/priv/guest_ppc_toIR.c
+++ b/priv/guest_ppc_toIR.c
@@ -7951,7 +7951,7 @@
             return False;
          DIP("frsqrtes%s fr%u,fr%u\n", flag_rC ? ".":"",
              frD_addr, frB_addr);
-         assign( frD, unop(Iop_Est5FRSqrt, mkexpr(frB)) );
+         assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
          break;
 
       default:
@@ -8052,7 +8052,7 @@
             return False;
          DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? ".":"",
              frD_addr, frB_addr);
-         assign( frD, unop(Iop_Est5FRSqrt, mkexpr(frB)) );
+         assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
          break;
 
       default:
@@ -18077,12 +18077,12 @@
    switch (opc2) {
    case 0x10A: // vrefp (Reciprocal Esimate FP, AV p228)
       DIP("vrefp v%d,v%d\n", vD_addr, vB_addr);
-      putVReg( vD_addr, unop(Iop_Recip32Fx4, mkexpr(vB)) );
+      putVReg( vD_addr, unop(Iop_RecipEst32Fx4, mkexpr(vB)) );
       return True;
 
    case 0x14A: // vrsqrtefp (Reciprocal Sqrt Estimate FP, AV p237)
       DIP("vrsqrtefp v%d,v%d\n", vD_addr, vB_addr);
-      putVReg( vD_addr, unop(Iop_RSqrt32Fx4, mkexpr(vB)) );
+      putVReg( vD_addr, unop(Iop_RSqrtEst32Fx4, mkexpr(vB)) );
       return True;
 
    case 0x18A: // vexptefp (2 Raised to the Exp Est FP, AV p173)
diff --git a/priv/guest_x86_toIR.c b/priv/guest_x86_toIR.c
index 2ba4394..7f492e8 100644
--- a/priv/guest_x86_toIR.c
+++ b/priv/guest_x86_toIR.c
@@ -9351,7 +9351,7 @@
    if (insn[0] == 0x0F && insn[1] == 0x53) {
       vassert(sz == 4);
       delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
-                                        "rcpps", Iop_Recip32Fx4 );
+                                        "rcpps", Iop_RecipEst32Fx4 );
       goto decode_success;
    }
 
@@ -9359,7 +9359,7 @@
    if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x53) {
       vassert(sz == 4);
       delta = dis_SSE_E_to_G_unary_lo32( sorb, delta+3, 
-                                         "rcpss", Iop_Recip32F0x4 );
+                                         "rcpss", Iop_RecipEst32F0x4 );
       goto decode_success;
    }
 
@@ -9367,7 +9367,7 @@
    if (insn[0] == 0x0F && insn[1] == 0x52) {
       vassert(sz == 4);
       delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
-                                        "rsqrtps", Iop_RSqrt32Fx4 );
+                                        "rsqrtps", Iop_RSqrtEst32Fx4 );
       goto decode_success;
    }
 
@@ -9375,7 +9375,7 @@
    if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x52) {
       vassert(sz == 4);
       delta = dis_SSE_E_to_G_unary_lo32( sorb, delta+3, 
-                                         "rsqrtss", Iop_RSqrt32F0x4 );
+                                         "rsqrtss", Iop_RSqrtEst32F0x4 );
       goto decode_success;
    }
 
diff --git a/priv/host_amd64_isel.c b/priv/host_amd64_isel.c
index 8dc67b3..dd894e8 100644
--- a/priv/host_amd64_isel.c
+++ b/priv/host_amd64_isel.c
@@ -3226,9 +3226,9 @@
          return dst;
       }
 
-      case Iop_Recip32Fx4: op = Asse_RCPF;   goto do_32Fx4_unary;
-      case Iop_RSqrt32Fx4: op = Asse_RSQRTF; goto do_32Fx4_unary;
-      case Iop_Sqrt32Fx4:  op = Asse_SQRTF;  goto do_32Fx4_unary;
+      case Iop_RecipEst32Fx4: op = Asse_RCPF;   goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: op = Asse_RSQRTF; goto do_32Fx4_unary;
+      case Iop_Sqrt32Fx4:     op = Asse_SQRTF;  goto do_32Fx4_unary;
       do_32Fx4_unary:
       {
          HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
@@ -3246,9 +3246,9 @@
          return dst;
       }
 
-      case Iop_Recip32F0x4: op = Asse_RCPF;   goto do_32F0x4_unary;
-      case Iop_RSqrt32F0x4: op = Asse_RSQRTF; goto do_32F0x4_unary;
-      case Iop_Sqrt32F0x4:  op = Asse_SQRTF;  goto do_32F0x4_unary;
+      case Iop_RecipEst32F0x4: op = Asse_RCPF;   goto do_32F0x4_unary;
+      case Iop_RSqrtEst32F0x4: op = Asse_RSQRTF; goto do_32F0x4_unary;
+      case Iop_Sqrt32F0x4:     op = Asse_SQRTF;  goto do_32F0x4_unary;
       do_32F0x4_unary:
       {
          /* A bit subtle.  We have to copy the arg to the result
@@ -3800,9 +3800,9 @@
          return;
       }
 
-      case Iop_Recip32Fx8: op = Asse_RCPF;   goto do_32Fx8_unary;
-      case Iop_Sqrt32Fx8:  op = Asse_SQRTF;  goto do_32Fx8_unary;
-      case Iop_RSqrt32Fx8: op = Asse_RSQRTF; goto do_32Fx8_unary;
+      case Iop_RecipEst32Fx8: op = Asse_RCPF;   goto do_32Fx8_unary;
+      case Iop_Sqrt32Fx8:     op = Asse_SQRTF;  goto do_32Fx8_unary;
+      case Iop_RSqrtEst32Fx8: op = Asse_RSQRTF; goto do_32Fx8_unary;
       do_32Fx8_unary:
       {
          HReg argHi, argLo;
diff --git a/priv/host_arm_isel.c b/priv/host_arm_isel.c
index 6b18689..153f828 100644
--- a/priv/host_arm_isel.c
+++ b/priv/host_arm_isel.c
@@ -2286,7 +2286,7 @@
                                            res, argL, argR, size, False));
             return res;
          }
-         case Iop_Recps32Fx2: {
+         case Iop_RecipStep32Fx2: {
             HReg res = newVRegD(env);
             HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
             HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
@@ -2295,7 +2295,7 @@
                                            res, argL, argR, size, False));
             return res;
          }
-         case Iop_Rsqrts32Fx2: {
+         case Iop_RSqrtStep32Fx2: {
             HReg res = newVRegD(env);
             HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
             HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
@@ -3644,14 +3644,14 @@
                                           res, arg, 2, False));
             return res;
          }
-         case Iop_Recip32Fx2: {
+         case Iop_RecipEst32Fx2: {
             HReg res = newVRegD(env);
             HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRECIPF,
                                           res, argL, 0, False));
             return res;
          }
-         case Iop_Recip32x2: {
+         case Iop_RecipEst32Ux2: {
             HReg res = newVRegD(env);
             HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRECIP,
@@ -3680,14 +3680,14 @@
                return res;
             }
          }
-         case Iop_Rsqrte32Fx2: {
+         case Iop_RSqrtEst32Fx2: {
             HReg res = newVRegD(env);
             HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTEFP,
                                           res, arg, 0, False));
             return res;
          }
-         case Iop_Rsqrte32x2: {
+         case Iop_RSqrtEst32Ux2: {
             HReg res = newVRegD(env);
             HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTE,
@@ -4239,14 +4239,14 @@
                                           res, arg, 2, True));
             return res;
          }
-         case Iop_Recip32Fx4: {
+         case Iop_RecipEst32Fx4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRECIPF,
                                           res, argL, 0, True));
             return res;
          }
-         case Iop_Recip32x4: {
+         case Iop_RecipEst32Ux4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRECIP,
@@ -4260,14 +4260,14 @@
                                           res, argL, 0, True));
             return res;
          }
-         case Iop_Rsqrte32Fx4: {
+         case Iop_RSqrtEst32Fx4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTEFP,
                                           res, argL, 0, True));
             return res;
          }
-         case Iop_Rsqrte32x4: {
+         case Iop_RSqrtEst32Ux4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
             addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTE,
@@ -4442,7 +4442,7 @@
                                            res, argL, argR, size, True));
             return res;
          }
-         case Iop_Recps32Fx4: {
+         case Iop_RecipStep32Fx4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
             HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
@@ -4451,7 +4451,7 @@
                                            res, argL, argR, size, True));
             return res;
          }
-         case Iop_Rsqrts32Fx4: {
+         case Iop_RSqrtStep32Fx4: {
             HReg res = newVRegV(env);
             HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
             HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
diff --git a/priv/host_ppc_isel.c b/priv/host_ppc_isel.c
index 95ed7b6..f58e1a2 100644
--- a/priv/host_ppc_isel.c
+++ b/priv/host_ppc_isel.c
@@ -4139,7 +4139,7 @@
       switch (e->Iex.Unop.op) {
          case Iop_NegF64:     fpop = Pfp_NEG; break;
          case Iop_AbsF64:     fpop = Pfp_ABS; break;
-         case Iop_Est5FRSqrt: fpop = Pfp_RSQRTE; break;
+         case Iop_RSqrtEst5GoodF64:      fpop = Pfp_RSQRTE; break;
          case Iop_RoundF64toF64_NegINF:  fpop = Pfp_FRIM; break;
          case Iop_RoundF64toF64_PosINF:  fpop = Pfp_FRIP; break;
          case Iop_RoundF64toF64_NEAREST: fpop = Pfp_FRIN; break;
@@ -4930,8 +4930,8 @@
          return dst;
       }
 
-      case Iop_Recip32Fx4:    fpop = Pavfp_RCPF;    goto do_32Fx4_unary;
-      case Iop_RSqrt32Fx4:    fpop = Pavfp_RSQRTF;  goto do_32Fx4_unary;
+      case Iop_RecipEst32Fx4: fpop = Pavfp_RCPF;    goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: fpop = Pavfp_RSQRTF;  goto do_32Fx4_unary;
       case Iop_I32UtoFx4:     fpop = Pavfp_CVTU2F;  goto do_32Fx4_unary;
       case Iop_I32StoFx4:     fpop = Pavfp_CVTS2F;  goto do_32Fx4_unary;
       case Iop_QFtoI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary;
diff --git a/priv/host_x86_isel.c b/priv/host_x86_isel.c
index 5fe816b..b6d47f5 100644
--- a/priv/host_x86_isel.c
+++ b/priv/host_x86_isel.c
@@ -3413,9 +3413,9 @@
          return dst;
       }
 
-      case Iop_Recip32Fx4: op = Xsse_RCPF;   goto do_32Fx4_unary;
-      case Iop_RSqrt32Fx4: op = Xsse_RSQRTF; goto do_32Fx4_unary;
-      case Iop_Sqrt32Fx4:  op = Xsse_SQRTF;  goto do_32Fx4_unary;
+      case Iop_RecipEst32Fx4: op = Xsse_RCPF;   goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: op = Xsse_RSQRTF; goto do_32Fx4_unary;
+      case Iop_Sqrt32Fx4:     op = Xsse_SQRTF;  goto do_32Fx4_unary;
       do_32Fx4_unary:
       {
          HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
@@ -3424,8 +3424,6 @@
          return dst;
       }
 
-      case Iop_Recip64Fx2: op = Xsse_RCPF;   goto do_64Fx2_unary;
-      case Iop_RSqrt64Fx2: op = Xsse_RSQRTF; goto do_64Fx2_unary;
       case Iop_Sqrt64Fx2:  op = Xsse_SQRTF;  goto do_64Fx2_unary;
       do_64Fx2_unary:
       {
@@ -3436,9 +3434,9 @@
          return dst;
       }
 
-      case Iop_Recip32F0x4: op = Xsse_RCPF;   goto do_32F0x4_unary;
-      case Iop_RSqrt32F0x4: op = Xsse_RSQRTF; goto do_32F0x4_unary;
-      case Iop_Sqrt32F0x4:  op = Xsse_SQRTF;  goto do_32F0x4_unary;
+      case Iop_RecipEst32F0x4: op = Xsse_RCPF;   goto do_32F0x4_unary;
+      case Iop_RSqrtEst32F0x4: op = Xsse_RSQRTF; goto do_32F0x4_unary;
+      case Iop_Sqrt32F0x4:     op = Xsse_SQRTF;  goto do_32F0x4_unary;
       do_32F0x4_unary:
       {
          /* A bit subtle.  We have to copy the arg to the result
@@ -3454,8 +3452,6 @@
          return dst;
       }
 
-      case Iop_Recip64F0x2: op = Xsse_RCPF;   goto do_64F0x2_unary;
-      case Iop_RSqrt64F0x2: op = Xsse_RSQRTF; goto do_64F0x2_unary;
       case Iop_Sqrt64F0x2:  op = Xsse_SQRTF;  goto do_64F0x2_unary;
       do_64F0x2_unary:
       {
diff --git a/priv/ir_defs.c b/priv/ir_defs.c
index c8f90fe..55d24f0 100644
--- a/priv/ir_defs.c
+++ b/priv/ir_defs.c
@@ -329,7 +329,7 @@
       case Iop_MAddF64r32: vex_printf("MAddF64r32"); return;
       case Iop_MSubF64r32: vex_printf("MSubF64r32"); return;
 
-      case Iop_Est5FRSqrt:    vex_printf("Est5FRSqrt"); return;
+      case Iop_RSqrtEst5GoodF64: vex_printf("RSqrtEst5GoodF64"); return;
       case Iop_RoundF64toF64_NEAREST: vex_printf("RoundF64toF64_NEAREST"); return;
       case Iop_RoundF64toF64_NegINF: vex_printf("RoundF64toF64_NegINF"); return;
       case Iop_RoundF64toF64_PosINF: vex_printf("RoundF64toF64_PosINF"); return;
@@ -402,10 +402,10 @@
       case Iop_F32toF16x4: vex_printf("F32toF16x4"); return;
       case Iop_F16toF32x4: vex_printf("F16toF32x4"); return;
 
-      case Iop_Rsqrte32Fx4: vex_printf("VRsqrte32Fx4"); return;
-      case Iop_Rsqrte32x4:  vex_printf("VRsqrte32x4"); return;
-      case Iop_Rsqrte32Fx2: vex_printf("VRsqrte32Fx2"); return;
-      case Iop_Rsqrte32x2:  vex_printf("VRsqrte32x2"); return;
+      case Iop_RSqrtEst32Fx4: vex_printf("RSqrtEst32Fx4"); return;
+      case Iop_RSqrtEst32Ux4: vex_printf("RSqrtEst32Ux4"); return;
+      case Iop_RSqrtEst32Fx2: vex_printf("RSqrtEst32Fx2"); return;
+      case Iop_RSqrtEst32Ux2: vex_printf("RSqrtEst32Ux2"); return;
 
       case Iop_QFtoI32Ux4_RZ: vex_printf("QFtoI32Ux4_RZ"); return;
       case Iop_QFtoI32Sx4_RZ: vex_printf("QFtoI32Sx4_RZ"); return;
@@ -629,26 +629,21 @@
       case Iop_Mul64Fx2:  vex_printf("Mul64Fx2"); return;
       case Iop_Mul64F0x2: vex_printf("Mul64F0x2"); return;
 
-      case Iop_Recip32x2: vex_printf("Recip32x2"); return;
-      case Iop_Recip32Fx2:  vex_printf("Recip32Fx2"); return;
-      case Iop_Recip32Fx4:  vex_printf("Recip32Fx4"); return;
-      case Iop_Recip32Fx8:  vex_printf("Recip32Fx8"); return;
-      case Iop_Recip32x4:  vex_printf("Recip32x4"); return;
-      case Iop_Recip32F0x4: vex_printf("Recip32F0x4"); return;
-      case Iop_Recip64Fx2:  vex_printf("Recip64Fx2"); return;
-      case Iop_Recip64F0x2: vex_printf("Recip64F0x2"); return;
-      case Iop_Recps32Fx2:  vex_printf("VRecps32Fx2"); return;
-      case Iop_Recps32Fx4:  vex_printf("VRecps32Fx4"); return;
+      case Iop_RecipEst32Ux2: vex_printf("RecipEst32Ux2"); return;
+      case Iop_RecipEst32Fx2: vex_printf("RecipEst32Fx2"); return;
+      case Iop_RecipEst32Fx4: vex_printf("RecipEst32Fx4"); return;
+      case Iop_RecipEst32Fx8: vex_printf("RecipEst32Fx8"); return;
+      case Iop_RecipEst32Ux4: vex_printf("RecipEst32Ux4"); return;
+      case Iop_RecipEst32F0x4: vex_printf("RecipEst32F0x4"); return;
+      case Iop_RecipStep32Fx2: vex_printf("RecipStep32Fx2"); return;
+      case Iop_RecipStep32Fx4: vex_printf("RecipStep32Fx4"); return;
       case Iop_Abs32Fx4:  vex_printf("Abs32Fx4"); return;
       case Iop_Abs64Fx2:  vex_printf("Abs64Fx2"); return;
-      case Iop_Rsqrts32Fx4:  vex_printf("VRsqrts32Fx4"); return;
-      case Iop_Rsqrts32Fx2:  vex_printf("VRsqrts32Fx2"); return;
+      case Iop_RSqrtStep32Fx4:  vex_printf("RSqrtStep32Fx4"); return;
+      case Iop_RSqrtStep32Fx2:  vex_printf("RSqrtStep32Fx2"); return;
 
-      case Iop_RSqrt32Fx4:  vex_printf("RSqrt32Fx4"); return;
-      case Iop_RSqrt32F0x4: vex_printf("RSqrt32F0x4"); return;
-      case Iop_RSqrt32Fx8:  vex_printf("RSqrt32Fx8"); return;
-      case Iop_RSqrt64Fx2:  vex_printf("RSqrt64Fx2"); return;
-      case Iop_RSqrt64F0x2: vex_printf("RSqrt64F0x2"); return;
+      case Iop_RSqrtEst32F0x4: vex_printf("RSqrtEst32F0x4"); return;
+      case Iop_RSqrtEst32Fx8: vex_printf("RSqrtEst32Fx8"); return;
 
       case Iop_Sqrt32Fx4:  vex_printf("Sqrt32Fx4"); return;
       case Iop_Sqrt32F0x4: vex_printf("Sqrt32F0x4"); return;
@@ -2565,8 +2560,8 @@
       case Iop_Sal8x8: case Iop_Sal16x4: case Iop_Sal32x2: case Iop_Sal64x1:
       case Iop_QShl8x8: case Iop_QShl16x4: case Iop_QShl32x2: case Iop_QShl64x1:
       case Iop_QSal8x8: case Iop_QSal16x4: case Iop_QSal32x2: case Iop_QSal64x1:
-      case Iop_Recps32Fx2:
-      case Iop_Rsqrts32Fx2:
+      case Iop_RecipStep32Fx2:
+      case Iop_RSqrtStep32Fx2:
          BINARY(Ity_I64,Ity_I64, Ity_I64);
 
       case Iop_ShlN32x2: case Iop_ShlN16x4: case Iop_ShlN8x8:
@@ -2610,10 +2605,10 @@
       case Iop_Reverse8sIn16_x4:
       case Iop_FtoI32Sx2_RZ: case Iop_FtoI32Ux2_RZ:
       case Iop_I32StoFx2: case Iop_I32UtoFx2:
-      case Iop_Recip32x2: case Iop_Recip32Fx2:
+      case Iop_RecipEst32Ux2: case Iop_RecipEst32Fx2:
       case Iop_Abs32Fx2:
-      case Iop_Rsqrte32Fx2:
-      case Iop_Rsqrte32x2:
+      case Iop_RSqrtEst32Fx2:
+      case Iop_RSqrtEst32Ux2:
       case Iop_Neg32Fx2:
       case Iop_Abs8x8: case Iop_Abs16x4: case Iop_Abs32x2:
          UNARY(Ity_I64, Ity_I64);
@@ -2808,7 +2803,7 @@
       case Iop_MAddF64r32: case Iop_MSubF64r32:
          QUATERNARY(ity_RMode,Ity_F64,Ity_F64,Ity_F64, Ity_F64);
 
-      case Iop_Est5FRSqrt:
+      case Iop_RSqrtEst5GoodF64:
       case Iop_RoundF64toF64_NEAREST: case Iop_RoundF64toF64_NegINF:
       case Iop_RoundF64toF64_PosINF: case Iop_RoundF64toF64_ZERO:
          UNARY(Ity_F64, Ity_F64);
@@ -2828,8 +2823,8 @@
       case Iop_RoundF32x4_RN:
       case Iop_RoundF32x4_RZ:
       case Iop_Abs64Fx2: case Iop_Abs32Fx4:
-      case Iop_Rsqrte32Fx4:
-      case Iop_Rsqrte32x4:
+      case Iop_RSqrtEst32Fx4:
+      case Iop_RSqrtEst32Ux4:
          UNARY(Ity_V128, Ity_V128);
 
       case Iop_64HLtoV128:
@@ -2965,8 +2960,8 @@
       case Iop_InterleaveOddLanes16x8: case Iop_InterleaveEvenLanes16x8:
       case Iop_InterleaveOddLanes32x4: case Iop_InterleaveEvenLanes32x4:
       case Iop_Perm8x16: case Iop_Perm32x4:
-      case Iop_Recps32Fx4:
-      case Iop_Rsqrts32Fx4:
+      case Iop_RecipStep32Fx4:
+      case Iop_RSqrtStep32Fx4:
       case Iop_CipherV128:
       case Iop_CipherLV128:
       case Iop_NCipherV128:
@@ -2988,11 +2983,9 @@
          BINARY(Ity_I64, Ity_I64, Ity_V128);
 
       case Iop_NotV128:
-      case Iop_Recip32Fx4: case Iop_Recip32F0x4:
-      case Iop_Recip32x4:
-      case Iop_Recip64Fx2: case Iop_Recip64F0x2:
-      case Iop_RSqrt32Fx4: case Iop_RSqrt32F0x4:
-      case Iop_RSqrt64Fx2: case Iop_RSqrt64F0x2:
+      case Iop_RecipEst32Fx4: case Iop_RecipEst32F0x4:
+      case Iop_RecipEst32Ux4:
+      case Iop_RSqrtEst32F0x4:
       case Iop_Sqrt32Fx4:  case Iop_Sqrt32F0x4:
       case Iop_Sqrt64Fx2:  case Iop_Sqrt64F0x2:
       case Iop_CmpNEZ8x16: case Iop_CmpNEZ16x8:
@@ -3383,10 +3376,10 @@
          BINARY(Ity_V128,Ity_V128, Ity_V256);
 
       case Iop_NotV256:
-      case Iop_RSqrt32Fx8:
+      case Iop_RSqrtEst32Fx8:
       case Iop_Sqrt32Fx8:
       case Iop_Sqrt64Fx4:
-      case Iop_Recip32Fx8:
+      case Iop_RecipEst32Fx8:
       case Iop_CmpNEZ8x32: case Iop_CmpNEZ16x16:
       case Iop_CmpNEZ64x4: case Iop_CmpNEZ32x8:
          UNARY(Ity_V256, Ity_V256);
diff --git a/pub/libvex_ir.h b/pub/libvex_ir.h
index bc2fa46..fffe3b1 100644
--- a/pub/libvex_ir.h
+++ b/pub/libvex_ir.h
@@ -740,7 +740,7 @@
       Iop_MAddF64r32, Iop_MSubF64r32,
 
       /* :: F64 -> F64 */
-      Iop_Est5FRSqrt,    /* reciprocal square root estimate, 5 good bits */
+      Iop_RSqrtEst5GoodF64, /* reciprocal square root estimate, 5 good bits */
       Iop_RoundF64toF64_NEAREST, /* frin */
       Iop_RoundF64toF64_NegINF,  /* frim */ 
       Iop_RoundF64toF64_PosINF,  /* frip */
@@ -811,21 +811,21 @@
       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
       element in the operand vector, and places the results in the destination
       vector.  */
-      Iop_Recip32Fx2,
+      Iop_RecipEst32Fx2,
 
       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
          Note, that if one of the arguments is zero and another one is infinity
          of arbitrary sign the result of the operation is 2.0. */
-      Iop_Recps32Fx2,
+      Iop_RecipStep32Fx2,
 
       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
          square root of each element in the operand vector. */
-      Iop_Rsqrte32Fx2,
+      Iop_RSqrtEst32Fx2,
 
       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
          Note, that of one of the arguments is zero and another one is infiinty
          of arbitrary sign the result of the operation is 1.5. */
-      Iop_Rsqrts32Fx2,
+      Iop_RSqrtStep32Fx2,
 
       /* Unary */
       Iop_Neg32Fx2, Iop_Abs32Fx2,
@@ -1010,8 +1010,8 @@
       Iop_GetMSBs8x8, /* I64 -> I8 */
 
       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
-         See floating-point equiwalents for details. */
-      Iop_Recip32x2, Iop_Rsqrte32x2,
+         See floating-point equivalents for details. */
+      Iop_RecipEst32Ux2, Iop_RSqrtEst32Ux2,
 
       /* ------------------ Decimal Floating Point ------------------ */
 
@@ -1280,27 +1280,27 @@
 
       /* unary */
       Iop_Abs32Fx4,
-      Iop_Sqrt32Fx4, Iop_RSqrt32Fx4,
+      Iop_Sqrt32Fx4,
       Iop_Neg32Fx4,
 
       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
       element in the operand vector, and places the results in the destination
       vector.  */
-      Iop_Recip32Fx4,
+      Iop_RecipEst32Fx4,
 
       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
          Note, that if one of the arguments is zero and another one is infinity
          of arbitrary sign the result of the operation is 2.0. */
-      Iop_Recps32Fx4,
+      Iop_RecipStep32Fx4,
 
       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
          square root of each element in the operand vector. */
-      Iop_Rsqrte32Fx4,
+      Iop_RSqrtEst32Fx4,
 
       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
          Note, that of one of the arguments is zero and another one is infiinty
          of arbitrary sign the result of the operation is 1.5. */
-      Iop_Rsqrts32Fx4,
+      Iop_RSqrtStep32Fx4,
 
       /* --- Int to/from FP conversion --- */
       /* Unlike the standard fp conversions, these irops take no
@@ -1332,7 +1332,7 @@
       Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4, 
 
       /* unary */
-      Iop_Recip32F0x4, Iop_Sqrt32F0x4, Iop_RSqrt32F0x4,
+      Iop_RecipEst32F0x4, Iop_Sqrt32F0x4, Iop_RSqrtEst32F0x4,
 
       /* --- 64x2 vector FP --- */
 
@@ -1345,12 +1345,9 @@
 
       /* unary */
       Iop_Abs64Fx2,
-      Iop_Sqrt64Fx2, Iop_RSqrt64Fx2,
+      Iop_Sqrt64Fx2,
       Iop_Neg64Fx2,
 
-      /* Vector Reciprocal Estimate */
-      Iop_Recip64Fx2, 
-
       /* --- 64x2 lowest-lane-only scalar FP --- */
 
       /* In binary cases, upper half is copied from first operand.  In
@@ -1362,7 +1359,7 @@
       Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2, 
 
       /* unary */
-      Iop_Recip64F0x2, Iop_Sqrt64F0x2, Iop_RSqrt64F0x2,
+      Iop_Sqrt64F0x2,
 
       /* --- pack / unpack --- */
 
@@ -1736,8 +1733,8 @@
       Iop_GetMSBs8x16, /* V128 -> I16 */
 
       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
-         See floating-point equiwalents for details. */
-      Iop_Recip32x4, Iop_Rsqrte32x4,
+         See floating-point equivalents for details. */
+      Iop_RecipEst32Ux4, Iop_RSqrtEst32Ux4,
 
       /* ------------------ 256-bit SIMD Integer. ------------------ */
 
@@ -1808,8 +1805,8 @@
 
       Iop_Sqrt32Fx8,
       Iop_Sqrt64Fx4,
-      Iop_RSqrt32Fx8,
-      Iop_Recip32Fx8,
+      Iop_RSqrtEst32Fx8,
+      Iop_RecipEst32Fx8,
 
       Iop_Max32Fx8, Iop_Min32Fx8,
       Iop_Max64Fx4, Iop_Min64Fx4,