Extend lifetime of CodeInfo in StackVisitor.

Users of the visitor may use the API after WalkStack is finished.

Test: test.py -b --host
Bug: 120844051
Change-Id: Ia445d3b4c5c338402c92f1d29b522a33cd5baa14
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 54e8861..d6a1d7b 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -100,8 +100,6 @@
   BitMemoryRegion table_data_;
   size_t num_rows_ = 0;
   uint16_t column_offset_[kNumColumns + 1] = {};
-
-  DISALLOW_COPY_AND_ASSIGN(BitTableBase);
 };
 
 // Helper class which can be used to create BitTable accessors with named getters.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 9a677de..80a563b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -816,13 +816,14 @@
             // JNI methods cannot have any inlined frames.
             && !method->IsNative()) {
           DCHECK_NE(cur_quick_frame_pc_, 0u);
-          CodeInfo code_info(cur_oat_quick_method_header_, CodeInfo::DecodeFlags::InlineInfoOnly);
+          current_code_info_ = CodeInfo(cur_oat_quick_method_header_,
+                                        CodeInfo::DecodeFlags::InlineInfoOnly);
           uint32_t native_pc_offset =
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+          StackMap stack_map = current_code_info_.GetStackMapForNativePcOffset(native_pc_offset);
           if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
             DCHECK_EQ(current_inline_frames_.size(), 0u);
-            for (current_inline_frames_ = code_info.GetInlineInfosOf(stack_map);
+            for (current_inline_frames_ = current_code_info_.GetInlineInfosOf(stack_map);
                  !current_inline_frames_.empty();
                  current_inline_frames_.pop_back()) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index 0edf4f5..1f305d2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -342,6 +342,7 @@
   size_t cur_depth_;
   // Current inlined frames of the method we are currently at.
   // We keep poping frames from the end as we visit the frames.
+  CodeInfo current_code_info_;
   BitTableRange<InlineInfo> current_inline_frames_;
 
  protected:
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 811e23b..87133cf 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -301,6 +301,8 @@
     InlineInfoOnly = 2,
   };
 
+  CodeInfo() {}
+
   explicit CodeInfo(const uint8_t* data, DecodeFlags flags = AllTables) {
     Decode(reinterpret_cast<const uint8_t*>(data), flags);
   }
@@ -445,8 +447,6 @@
   }
 
  private:
-  CodeInfo() {}
-
   // Returns lower bound (fist stack map which has pc greater or equal than the desired one).
   // It ignores catch stack maps at the end (it is the same as if they had maximum pc value).
   BitTable<StackMap>::const_iterator BinarySearchNativePc(uint32_t packed_pc) const;
@@ -486,10 +486,10 @@
     callback(&CodeInfo::dex_register_catalog_);
   }
 
-  uint32_t packed_frame_size_;  // Frame size in kStackAlignment units.
-  uint32_t core_spill_mask_;
-  uint32_t fp_spill_mask_;
-  uint32_t number_of_dex_registers_;
+  uint32_t packed_frame_size_ = 0;  // Frame size in kStackAlignment units.
+  uint32_t core_spill_mask_ = 0;
+  uint32_t fp_spill_mask_ = 0;
+  uint32_t number_of_dex_registers_ = 0;
   BitTable<StackMap> stack_maps_;
   BitTable<RegisterMask> register_masks_;
   BitTable<StackMask> stack_masks_;