Thumb2: Reduce memory used for fixup dependencies.

Mark fixups for far backward branches as 32-bit from the
beginning and ignore their dependencies since they cannot
be expanded anymore. This reduces the memory used by the
fixup_dependents_ when compiling methods with a lot of
slow paths.

Bug: 28256882
Change-Id: I0b7155ed8970b69703fa9c4666d9c4961e425721
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 2c73fb8..3a4c23d 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -37,6 +37,9 @@
   const FixupId end_id = assembler->fixups_.size();
   Fixup* fixups = assembler->fixups_.data();
   for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
+    if (!fixups[fixup_id].CanExpand()) {
+      continue;
+    }
     uint32_t target = fixups[fixup_id].target_;
     if (target > fixups[fixup_id].location_) {
       for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
@@ -62,6 +65,9 @@
   assembler->fixup_dependents_.resize(number_of_dependents);
   FixupId* dependents = assembler->fixup_dependents_.data();
   for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
+    if (!fixups[fixup_id].CanExpand()) {
+      continue;
+    }
     uint32_t target = fixups[fixup_id].target_;
     if (target > fixups[fixup_id].location_) {
       for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
@@ -115,6 +121,7 @@
                                           std::deque<FixupId>* fixups_to_recalculate) {
   uint32_t adjustment = fixup->AdjustSizeIfNeeded(*current_code_size);
   if (adjustment != 0u) {
+    DCHECK(fixup->CanExpand());
     *current_code_size += adjustment;
     for (FixupId dependent_id : fixup->Dependents(*this)) {
       Fixup* dependent = GetFixup(dependent_id);
@@ -2546,9 +2553,19 @@
       }
     } else {
       branch_type = Fixup::kUnconditional;             // B.
+      // The T2 encoding offset is `SignExtend(imm11:'0', 32)` and there is a PC adjustment of 4.
+      static constexpr size_t kMaxT2BackwardDistance = (1u << 11) - 4u;
+      if (!use32bit && label->IsBound() && pc - label->Position() > kMaxT2BackwardDistance) {
+        use32bit = true;
+      }
     }
   } else {
     branch_type = Fixup::kConditional;                 // B<cond>.
+    // The T1 encoding offset is `SignExtend(imm8:'0', 32)` and there is a PC adjustment of 4.
+    static constexpr size_t kMaxT1BackwardDistance = (1u << 8) - 4u;
+    if (!use32bit && label->IsBound() && pc - label->Position() > kMaxT1BackwardDistance) {
+      use32bit = true;
+    }
   }
 
   Fixup::Size size = use32bit ? Fixup::kBranch32Bit : Fixup::kBranch16Bit;
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 111a6b0..bc5b708 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -538,6 +538,20 @@
       return GetType() >= kLoadLiteralNarrow;
     }
 
+    // Returns whether the Fixup can expand from the original size.
+    bool CanExpand() const {
+      switch (GetOriginalSize()) {
+        case kBranch32Bit:
+        case kCbxz48Bit:
+        case kLiteralFar:
+        case kLiteralAddrFar:
+        case kLongOrFPLiteralFar:
+          return false;
+        default:
+          return true;
+      }
+    }
+
     Size GetOriginalSize() const {
       return original_size_;
     }