Avoid using i64 types for vld1q_lane/vst1q_lane intrinsics.

The backend has to legalize i64 types by splitting them into two 32-bit pieces,
which leads to poor quality code.  If we produce code for these intrinsics that
uses one-element vector types, which can live in Neon vector registers without
getting split up, then the generated code is much better.  Radar 11998303.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@161879 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 65c782e..4c401c5 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -1720,8 +1720,29 @@
     Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
                         Ops, "vld1");
-  case ARM::BI__builtin_neon_vld1_lane_v:
-  case ARM::BI__builtin_neon_vld1q_lane_v: {
+  case ARM::BI__builtin_neon_vld1q_lane_v:
+    // Handle 64-bit integer elements as a special case.  Use shuffles of
+    // one-element vectors to avoid poor code for i64 in the backend.
+    if (VTy->getElementType()->isIntegerTy(64)) {
+      // Extract the other lane.
+      Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+      int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+      Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
+      Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+      // Load the value as a one-element vector.
+      Ty = llvm::VectorType::get(VTy->getElementType(), 1);
+      Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
+      Value *Ld = Builder.CreateCall2(F, Ops[0],
+                                      GetPointeeAlignmentValue(E->getArg(0)));
+      // Combine them.
+      SmallVector<Constant*, 2> Indices;
+      Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
+      Indices.push_back(ConstantInt::get(Int32Ty, Lane));
+      SV = llvm::ConstantVector::get(Indices);
+      return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
+    }
+    // fall through
+  case ARM::BI__builtin_neon_vld1_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -2086,8 +2107,19 @@
     Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
                         Ops, "");
-  case ARM::BI__builtin_neon_vst1_lane_v:
-  case ARM::BI__builtin_neon_vst1q_lane_v: {
+  case ARM::BI__builtin_neon_vst1q_lane_v:
+    // Handle 64-bit integer elements as a special case.  Use a shuffle to get
+    // a one-element vector and avoid poor code for i64 in the backend.
+    if (VTy->getElementType()->isIntegerTy(64)) {
+      Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+      Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
+      Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+      Ops[2] = GetPointeeAlignmentValue(E->getArg(0));
+      return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
+                                                 Ops[1]->getType()), Ops);
+    }
+    // fall through
+  case ARM::BI__builtin_neon_vst1_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
diff --git a/test/CodeGen/arm-neon-misc.c b/test/CodeGen/arm-neon-misc.c
new file mode 100644
index 0000000..56ce316
--- /dev/null
+++ b/test/CodeGen/arm-neon-misc.c
@@ -0,0 +1,34 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple thumbv7-apple-darwin \
+// RUN:   -target-abi apcs-gnu \
+// RUN:   -target-cpu cortex-a8 \
+// RUN:   -mfloat-abi soft \
+// RUN:   -target-feature +soft-float-abi \
+// RUN:   -ffreestanding \
+// RUN:   -emit-llvm -w -o - %s | FileCheck %s
+
+#include <arm_neon.h>
+
+// Radar 11998303: Avoid using i64 types for vld1q_lane and vst1q_lane Neon
+// intrinsics with <2 x i64> vectors to avoid poor code for i64 in the backend.
+void t1(uint64_t *src, uint8_t *dst) {
+// CHECK: @t1
+  uint64x2_t q = vld1q_u64(src);
+// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64
+  vst1q_lane_u64(dst, q, 1);
+// CHECK: bitcast <16 x i8> %{{.*}} to <2 x i64>
+// CHECK: shufflevector <2 x i64>
+// CHECK: call void @llvm.arm.neon.vst1.v1i64
+}
+
+void t2(uint64_t *src1, uint8_t *src2, uint64x2_t *dst) {
+// CHECK: @t2
+    uint64x2_t q = vld1q_u64(src1);
+// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64
+    q = vld1q_lane_u64(src2, q, 0);
+// CHECK: shufflevector <2 x i64>
+// CHECK: call <1 x i64> @llvm.arm.neon.vld1.v1i64
+// CHECK: shufflevector <1 x i64>
+    *dst = q;
+// CHECK: store <2 x i64>
+}