Turn off the generation of unaligned atomic load/store; I'm going to explicitly error out on such cases in the backend, at least for the moment.



git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@139640 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 8f5e1fb..456909b 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -378,7 +378,9 @@
 /// accesses.  They don't have to be fast, just faster than a function
 /// call and a mutex.
 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
-  return (arch == llvm::Triple::x86 || arch == llvm::Triple::x86_64);
+  // FIXME: Allow unaligned atomic load/store on x86.  (It is not
+  // currently supported by the backend.)
+  return 0;
 }
 
 /// Return the maximum size that permits atomic accesses for the given
diff --git a/test/CodeGenObjC/property-aggregate.m b/test/CodeGenObjC/property-aggregate.m
index f4b4dc9..de93aed 100644
--- a/test/CodeGenObjC/property-aggregate.m
+++ b/test/CodeGenObjC/property-aggregate.m
@@ -6,6 +6,8 @@
 struct s3 { char c[3]; };
 
 // This structure's size is, so it does, because it can.
+// FIXME: But we don't at the moment; the backend doesn't know how to generate
+// correct code.
 struct s4 { char c[4]; };
 
 @interface Test0
@@ -23,7 +25,7 @@
 // CHECK: call void @objc_copyStruct
 
 // CHECK: define internal i32 @"\01-[Test0 s4]"(
-// CHECK: load atomic i32* {{%.*}} unordered, align 1
+// CHECK: call void @objc_copyStruct
 
 // CHECK: define internal void @"\01-[Test0 setS4:]"(
-// CHECK: store atomic i32 {{%.*}}, i32* {{%.*}} unordered, align 1
+// CHECK: call void @objc_copyStruct