patch from paul.lind@imgtec.com fix allocator alignment

Bug 11222825

Upstream chromium has a fix for this in M31 but does not apply to M30,
so creating a spot-fix in the klp branch. #ifdef for MIPS to reduce risk
as no other CPU is known to require this fix (it should be valid on all
platforms).

Change-Id: I924bea66eb3cef725a0dac89f5717a31fb471194
diff --git a/Source/wtf/PartitionAlloc.h b/Source/wtf/PartitionAlloc.h
index a66ba61..9847430 100644
--- a/Source/wtf/PartitionAlloc.h
+++ b/Source/wtf/PartitionAlloc.h
@@ -80,6 +80,7 @@
 // - No specific protection against corruption of page header metadata.
 
 #include "wtf/Assertions.h"
+#include "wtf/CPU.h"
 #include "wtf/FastMalloc.h"
 #include "wtf/SpinLock.h"
 
@@ -87,8 +88,17 @@
 
 namespace WTF {
 
-// Allocation granularity of sizeof(void*) bytes.
-static const size_t kAllocationGranularity = sizeof(void*);
+#if CPU(MIPS)
+    // Allocation granularity of sizeof(double) bytes.
+    typedef double align_t;
+    #define WTF_ALIGN(n)  __attribute__((__aligned__(n)))
+#else
+    // Allocation granularity of sizeof(void*) bytes.
+    typedef void * align_t;
+    #define WTF_ALIGN(n)
+#endif
+
+static const size_t kAllocationGranularity = sizeof(align_t);
 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
 // Supports allocations up to 4088 (one bucket is used for metadata).
@@ -121,19 +131,19 @@
     PartitionFreelistEntry* freelistHead;
     PartitionPageHeader* next;
     PartitionPageHeader* prev;
-};
+} WTF_ALIGN(sizeof(align_t));
 
 struct PartitionFreepagelistEntry {
     PartitionPageHeader* page;
     PartitionFreepagelistEntry* next;
-};
+} WTF_ALIGN(sizeof(align_t));
 
 struct PartitionBucket {
     PartitionRoot* root;
     PartitionPageHeader* currPage;
     PartitionFreepagelistEntry* freePages;
     size_t numFullPages;
-};
+} WTF_ALIGN(sizeof(align_t));
 
 struct PartitionRoot {
     int lock;
@@ -189,11 +199,19 @@
     return partitionAllocSlowPath(bucket);
 }
 
+ALWAYS_INLINE size_t partitionAllocRoundup(size_t size)
+{
+    return (size + kAllocationGranularityMask) & ~kAllocationGranularityMask;
+}
+
 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
 {
 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
     return malloc(size);
 #else
+#if CPU(MIPS)
+    size = partitionAllocRoundup(size);
+#endif
     size_t index = size >> kBucketShift;
     ASSERT(index < kNumBuckets);
     ASSERT(size == index << kBucketShift);
@@ -234,18 +252,13 @@
 #endif
 }
 
-ALWAYS_INLINE size_t partitionAllocRoundup(size_t size)
-{
-    return (size + kAllocationGranularityMask) & ~kAllocationGranularityMask;
-}
-
 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRoot* root, size_t size)
 {
 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
     return malloc(size);
 #else
+    size = partitionAllocRoundup(size);
     if (LIKELY(size <= kMaxAllocation)) {
-        size = partitionAllocRoundup(size);
         spinLockLock(&root->lock);
         void* ret = partitionAlloc(root, size);
         spinLockUnlock(&root->lock);