Imported Scudo Standalone changes:

  - 515e19ae7b5f8be51686def4b7a64e917125d8e0 Fix errors/warnings in scudo build. by Peter Collingbourne <peter@pcc.me.uk>
  - 87303fd9171199ac3082e17d4a91304bf82baeea scudo: Fix various test failures, mostly on 32-bit. by Peter Collingbourne <peter@pcc.me.uk>
  - 21695710cfa9a36256e9547155e2b9e0139f5c6a [scudo][standalone] Workaround for full regions on Android by Kostya Kortchinsky <kostyak@google.com>

Bug: 149481706

GitOrigin-RevId: 21695710cfa9a36256e9547155e2b9e0139f5c6a
Change-Id: Ia9c194830d01cd1951250d1f1888a15aa8e7caa3
diff --git a/standalone/combined.h b/standalone/combined.h
index bd78d79..e8390a7 100644
--- a/standalone/combined.h
+++ b/standalone/combined.h
@@ -267,6 +267,17 @@
       bool UnlockRequired;
       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
       Block = TSD->Cache.allocate(ClassId);
+      // If the allocation failed, the most likely reason with a 64-bit primary
+      // is the region being full. In that event, retry once using the
+      // immediately larger class (except if the failing class was already the
+      // largest). This will waste some memory but will allow the application to
+      // not fail.
+      if (SCUDO_ANDROID) {
+        if (UNLIKELY(!Block)) {
+          if (ClassId < SizeClassMap::LargestClassId)
+            Block = TSD->Cache.allocate(++ClassId);
+        }
+      }
       if (UnlockRequired)
         TSD->unlock();
     } else {
diff --git a/standalone/size_class_map.h b/standalone/size_class_map.h
index 46f53ae..3bbd165 100644
--- a/standalone/size_class_map.h
+++ b/standalone/size_class_map.h
@@ -24,7 +24,6 @@
 
 template <typename Config> struct SizeClassMapBase {
   static u32 getMaxCachedHint(uptr Size) {
-    DCHECK_LE(Size, MaxSize);
     DCHECK_NE(Size, 0);
     u32 N;
     // Force a 32-bit division if the template parameters allow for it.
@@ -95,10 +94,17 @@
       return (Size + MinSize - 1) >> Config::MinSizeLog;
     return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
   }
+
+  static u32 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
 };
 
 template <typename Config>
 class TableSizeClassMap : public SizeClassMapBase<Config> {
+  typedef SizeClassMapBase<Config> Base;
+
   static const u8 S = Config::NumBits - 1;
   static const uptr M = (1UL << S) - 1;
   static const uptr ClassesSize =
@@ -119,9 +125,9 @@
     constexpr static u8 computeClassId(uptr Size) {
       for (uptr i = 0; i != ClassesSize; ++i) {
         if (Size <= Config::Classes[i])
-          return i + 1;
+          return static_cast<u8>(i + 1);
       }
-      return -1;
+      return static_cast<u8>(-1);
     }
 
     constexpr static uptr getTableSize() {
@@ -156,8 +162,10 @@
     return Table.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
   }
 
-  static void print() {}
-  static void validate() {}
+  static u32 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
 };
 
 struct AndroidSizeClassConfig {
diff --git a/standalone/tests/combined_test.cpp b/standalone/tests/combined_test.cpp
index 488dca9..ce1b282 100644
--- a/standalone/tests/combined_test.cpp
+++ b/standalone/tests/combined_test.cpp
@@ -344,20 +344,21 @@
 #endif
 }
 
-
 struct DeathSizeClassConfig {
   static const scudo::uptr NumBits = 1;
   static const scudo::uptr MinSizeLog = 10;
   static const scudo::uptr MidSizeLog = 10;
-  static const scudo::uptr MaxSizeLog = 10;
-  static const scudo::u32 MaxNumCachedHint = 1;
-  static const scudo::uptr MaxBytesCachedLog = 10;
+  static const scudo::uptr MaxSizeLog = 11;
+  static const scudo::u32 MaxNumCachedHint = 4;
+  static const scudo::uptr MaxBytesCachedLog = 12;
 };
 
+static const scudo::uptr DeathRegionSizeLog = 20U;
 struct DeathConfig {
-  // Tiny allocator, its Primary only serves chunks of 1024 bytes.
+  // Tiny allocator, its Primary only serves chunks of two sizes.
   using DeathSizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
-  typedef scudo::SizeClassAllocator64<DeathSizeClassMap, 20U> Primary;
+  typedef scudo::SizeClassAllocator64<DeathSizeClassMap, DeathRegionSizeLog>
+      Primary;
   typedef scudo::MapAllocator<scudo::MapAllocatorNoCache> Secondary;
   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>;
 };
@@ -415,3 +416,39 @@
 
   Allocator->releaseToOS();
 }
+
+// Verify that when a region gets full, Android will still manage to
+// fulfill the allocation through a larger size class.
+TEST(ScudoCombinedTest, FullRegion) {
+  using AllocatorT = scudo::Allocator<DeathConfig>;
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+
+  const scudo::uptr Size = 1000U;
+  const scudo::uptr MaxNumberOfChunks =
+      (1U << DeathRegionSizeLog) /
+      DeathConfig::DeathSizeClassMap::getSizeByClassId(1U);
+  void *P;
+  std::vector<void *> V;
+  scudo::uptr FailedAllocationsCount = 0;
+  for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
+    P = Allocator->allocate(Size, Origin);
+    if (!P)
+      FailedAllocationsCount++;
+    else
+      V.push_back(P);
+  }
+  while (!V.empty()) {
+    Allocator->deallocate(V.back(), Origin);
+    V.pop_back();
+  }
+  if (SCUDO_ANDROID)
+    EXPECT_EQ(FailedAllocationsCount, 0U);
+  else
+    EXPECT_GT(FailedAllocationsCount, 0U);
+}
diff --git a/standalone/tests/wrappers_c_test.cpp b/standalone/tests/wrappers_c_test.cpp
index d4ba7d7..8b2bc6e 100644
--- a/standalone/tests/wrappers_c_test.cpp
+++ b/standalone/tests/wrappers_c_test.cpp
@@ -268,10 +268,26 @@
   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
   const size_t SpecialSize = PageSize - BlockDelta;
 
-  void *P = malloc(SpecialSize);
-  EXPECT_NE(P, nullptr);
-  BoundaryP = reinterpret_cast<uintptr_t>(P);
-  const uintptr_t Block = BoundaryP - BlockDelta;
+  // We aren't guaranteed that any size class is exactly a page wide. So we need
+  // to keep making allocations until we succeed.
+  //
+  // With a 16-byte block alignment and 4096-byte page size, each allocation has
+  // a probability of (1 - (16/4096)) of failing to meet the alignment
+  // requirements, and the probability of failing 65536 times is
+  // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
+  // 65536 tries, give up.
+  uintptr_t Block;
+  void *P = nullptr;
+  for (unsigned I = 0; I != 65536; ++I) {
+    void *PrevP = P;
+    P = malloc(SpecialSize);
+    EXPECT_NE(P, nullptr);
+    *reinterpret_cast<void **>(P) = PrevP;
+    BoundaryP = reinterpret_cast<uintptr_t>(P);
+    Block = BoundaryP - BlockDelta;
+    if ((Block & (PageSize - 1)) == 0U)
+      break;
+  }
   EXPECT_EQ((Block & (PageSize - 1)), 0U);
 
   Count = 0U;
@@ -281,7 +297,11 @@
   malloc_enable();
   EXPECT_EQ(Count, 1U);
 
-  free(P);
+  while (P) {
+    void *NextP = *reinterpret_cast<void **>(P);
+    free(P);
+    P = NextP;
+  }
 }
 
 // We expect heap operations within a disable/enable scope to deadlock.
diff --git a/standalone/tsd_shared.h b/standalone/tsd_shared.h
index 1626732..cf5453d 100644
--- a/standalone/tsd_shared.h
+++ b/standalone/tsd_shared.h
@@ -79,7 +79,7 @@
   }
 
   void enable() {
-    for (s32 I = NumberOfTSDs - 1; I >= 0; I--)
+    for (s32 I = static_cast<s32>(NumberOfTSDs - 1); I >= 0; I--)
       TSDs[I].unlock();
     Mutex.unlock();
   }