Remove support code for native Core Platform API checks

Native Core Platform API checks rely on the stack unwinder. The
unwinder instance increases the zygote rss by ~4MB. We need to keep the
unwinder around to keep checks fast and not churn memory resources.

Bug: 124338141
Bug: 144502743
Bug: 149029127
Test: m & boot
Change-Id: Ia306ec6e48d7b2b330efd771348b563767973269
(cherry picked from commit 9f8ead24a13e0471a333f0776a024e970662591c)
diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py
index e0b28bd..bf0f543 100755
--- a/build/apex/art_apex_test.py
+++ b/build/apex/art_apex_test.py
@@ -824,7 +824,6 @@
     self._checker.check_art_test_executable('membarrier_test')
     self._checker.check_art_test_executable('memfd_test')
     self._checker.check_art_test_executable('memory_region_test')
-    self._checker.check_art_test_executable('memory_type_table_test')
     self._checker.check_art_test_executable('safe_copy_test')
     self._checker.check_art_test_executable('scoped_flock_test')
     self._checker.check_art_test_executable('time_utils_test')
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index a414bd2..a9f9918 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -275,7 +275,6 @@
         "base/membarrier_test.cc",
         "base/memory_region_test.cc",
         "base/mem_map_test.cc",
-        "base/memory_type_table_test.cc",
         "base/safe_copy_test.cc",
         "base/scoped_flock_test.cc",
         "base/time_utils_test.cc",
diff --git a/libartbase/base/memory_type_table.h b/libartbase/base/memory_type_table.h
deleted file mode 100644
index 89d4ad5..0000000
--- a/libartbase/base/memory_type_table.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
-#define ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
-
-#include <iostream>
-#include <map>
-#include <vector>
-
-#include <android-base/macros.h>   // For DISALLOW_COPY_AND_ASSIGN
-#include <android-base/logging.h>  // For DCHECK macros
-
-namespace art {
-
-// Class representing a memory range together with type attribute.
-template <typename T>
-class MemoryTypeRange final {
- public:
-  MemoryTypeRange(uintptr_t start, uintptr_t limit, const T& type)
-      : start_(start), limit_(limit), type_(type) {}
-  MemoryTypeRange() : start_(0), limit_(0), type_() {}
-  MemoryTypeRange(MemoryTypeRange&& other) = default;
-  MemoryTypeRange(const MemoryTypeRange& other) = default;
-  MemoryTypeRange& operator=(const MemoryTypeRange& other) = default;
-
-  uintptr_t Start() const {
-    DCHECK(IsValid());
-    return start_;
-  }
-
-  uintptr_t Limit() const {
-    DCHECK(IsValid());
-    return limit_;
-  }
-
-  uintptr_t Size() const { return Limit() - Start(); }
-
-  const T& Type() const { return type_; }
-
-  bool IsValid() const { return start_ <= limit_; }
-
-  bool Contains(uintptr_t address) const {
-    return address >= Start() && address < Limit();
-  }
-
-  bool Overlaps(const MemoryTypeRange& other) const {
-    bool disjoint = Limit() <= other.Start() || Start() >= other.Limit();
-    return !disjoint;
-  }
-
-  bool Adjoins(const MemoryTypeRange& other) const {
-    return other.Start() == Limit() || other.Limit() == Start();
-  }
-
-  bool CombinableWith(const MemoryTypeRange& other) const {
-    return Type() == other.Type() && Adjoins(other);
-  }
-
- private:
-  uintptr_t start_;
-  uintptr_t limit_;
-  T type_;
-};
-
-// Class representing a table of memory ranges.
-template <typename T>
-class MemoryTypeTable final {
- public:
-  // Class used to construct and populate MemoryTypeTable instances.
-  class Builder;
-
-  MemoryTypeTable() {}
-
-  MemoryTypeTable(MemoryTypeTable&& table) : ranges_(std::move(table.ranges_)) {}
-
-  MemoryTypeTable& operator=(MemoryTypeTable&& table) {
-    ranges_ = std::move(table.ranges_);
-    return *this;
-  }
-
-  // Find the range containing |address|.
-  // Returns a pointer to a range on success, nullptr otherwise.
-  const MemoryTypeRange<T>* Lookup(uintptr_t address) const {
-    int last = static_cast<int>(ranges_.size()) - 1;
-    for (int l = 0, r = last; l <= r;) {
-      int m = l + (r - l) / 2;
-      if (address < ranges_[m].Start()) {
-        r = m - 1;
-      } else if (address >= ranges_[m].Limit()) {
-        l = m + 1;
-      } else {
-        DCHECK(ranges_[m].Contains(address))
-            << reinterpret_cast<void*>(address) << " " << ranges_[m];
-        return &ranges_[m];
-      }
-    }
-    return nullptr;
-  }
-
-  size_t Size() const { return ranges_.size(); }
-
-  void Print(std::ostream& os) const {
-    for (const auto& range : ranges_) {
-      os << range << std::endl;
-    }
-  }
-
- private:
-  std::vector<MemoryTypeRange<T>> ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryTypeTable);
-};
-
-// Class for building MemoryTypeTable instances. Supports
-// adding ranges and looking up ranges.
-template <typename T>
-class MemoryTypeTable<T>::Builder final {
- public:
-  Builder() {}
-
-  // Adds a range if it is valid and doesn't overlap with existing ranges.  If the range adjoins
-  // with an existing range, then the ranges are merged.
-  //
-  // Overlapping ranges and ranges of zero size are not supported.
-  //
-  // Returns true on success, false otherwise.
-  inline bool Add(const MemoryTypeRange<T>& region);
-
-  // Find the range containing |address|.
-  // Returns a pointer to a range on success, nullptr otherwise.
-  inline const MemoryTypeRange<T>* Lookup(uintptr_t address) const;
-
-  // Returns number of unique ranges.
-  inline size_t Size() const { return ranges_.size(); }
-
-  // Generates a MemoryTypeTable for added ranges.
-  MemoryTypeTable Build() const {
-    MemoryTypeTable table;
-    for (const auto& it : ranges_) {
-      table.ranges_.push_back(it.second);
-    }
-    return table;
-  }
-
- private:
-  std::map<uintptr_t, MemoryTypeRange<T>> ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(Builder);
-};
-
-template <typename T>
-bool operator==(const MemoryTypeRange<T>& lhs, const MemoryTypeRange<T>& rhs) {
-  return (lhs.Start() == rhs.Start() && lhs.Limit() == rhs.Limit() && lhs.Type() == rhs.Type());
-}
-
-template <typename T>
-bool operator!=(const MemoryTypeRange<T>& lhs, const MemoryTypeRange<T>& rhs) {
-  return !(lhs == rhs);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const MemoryTypeRange<T>& range) {
-  os << reinterpret_cast<void*>(range.Start())
-     << '-'
-     << reinterpret_cast<void*>(range.Limit())
-     << ' '
-     << range.Type();
-  return os;
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const MemoryTypeTable<T>& table) {
-  table.Print(os);
-  return os;
-}
-
-template <typename T>
-bool MemoryTypeTable<T>::Builder::Add(const MemoryTypeRange<T>& range) {
-  if (UNLIKELY(!range.IsValid() || range.Size() == 0u)) {
-    return false;
-  }
-
-  typename std::map<uintptr_t, MemoryTypeRange<T>>::iterator pred, succ;
-
-  // Find an iterator for the next element in the ranges.
-  succ = ranges_.lower_bound(range.Limit());
-
-  // Find an iterator for a predecessor element.
-  if (succ == ranges_.begin()) {
-    // No predecessor element if the successor is at the beginning of ranges.
-    pred = ranges_.end();
-  } else if (succ != ranges_.end()) {
-    // Predecessor is element before successor.
-    pred = std::prev(succ);
-  } else {
-    // Predecessor is the last element in a non-empty map when there is no successor.
-    pred = ranges_.find(ranges_.rbegin()->first);
-  }
-
-  // Invalidate |succ| if it cannot be combined with |range|.
-  if (succ != ranges_.end()) {
-    DCHECK_GE(succ->second.Limit(), range.Start());
-    if (range.Overlaps(succ->second)) {
-      return false;
-    }
-    if (!range.CombinableWith(succ->second)) {
-      succ = ranges_.end();
-    }
-  }
-
-  // Invalidate |pred| if it cannot be combined with |range|.
-  if (pred != ranges_.end()) {
-    if (range.Overlaps(pred->second)) {
-      return false;
-    }
-    if (!range.CombinableWith(pred->second)) {
-      pred = ranges_.end();
-    }
-  }
-
-  if (pred == ranges_.end()) {
-    if (succ == ranges_.end()) {
-      // |pred| is invalid, |succ| is invalid.
-      // No compatible neighbors for merging.
-      DCHECK(ranges_.find(range.Limit()) == ranges_.end());
-      ranges_[range.Limit()] = range;
-    } else {
-      // |pred| is invalid, |succ| is valid. Merge into |succ|.
-      const uintptr_t limit = succ->second.Limit();
-      DCHECK_GT(limit, range.Limit());
-      ranges_.erase(succ);
-      ranges_[limit] = MemoryTypeRange<T>(range.Start(), limit, range.Type());
-    }
-  } else {
-    if (succ == ranges_.end()) {
-      // |pred| is valid, |succ| is invalid. Merge into |pred|.
-      const uintptr_t start = pred->second.Start();
-      const uintptr_t limit = range.Limit();
-      DCHECK_LT(start, range.Start());
-      DCHECK_GT(limit, pred->second.Limit());
-      ranges_.erase(pred);
-      ranges_[limit] = MemoryTypeRange<T>(start, limit, range.Type());
-    } else {
-      // |pred| is valid, |succ| is valid. Merge between |pred| and |succ|.
-      DCHECK_LT(pred->second.Start(), range.Start());
-      DCHECK_GT(succ->second.Limit(), range.Limit());
-      const uintptr_t start = pred->second.Start();
-      const uintptr_t limit = succ->second.Limit();
-      ranges_.erase(pred, ++succ);
-      ranges_[limit] = MemoryTypeRange<T>(start, limit, range.Type());
-    }
-  }
-  return true;
-}
-
-template <typename T>
-const MemoryTypeRange<T>* MemoryTypeTable<T>::Builder::Lookup(uintptr_t address) const {
-  auto it = ranges_.upper_bound(address);
-  if (it != ranges_.end() && it->second.Contains(address)) {
-    return &it->second;
-  } else {
-    return nullptr;
-  }
-}
-
-}  // namespace art
-
-#endif  // ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
diff --git a/libartbase/base/memory_type_table_test.cc b/libartbase/base/memory_type_table_test.cc
deleted file mode 100644
index 1ffefef..0000000
--- a/libartbase/base/memory_type_table_test.cc
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "memory_type_table.h"
-
-#include <limits>
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(memory_type_range, range) {
-  MemoryTypeRange<int> r(0x1000u, 0x2000u, 42);
-  EXPECT_EQ(r.Start(), 0x1000u);
-  EXPECT_EQ(r.Limit(), 0x2000u);
-  EXPECT_EQ(r.Type(), 42);
-}
-
-TEST(memory_type_range, range_contains) {
-  MemoryTypeRange<int> r(0x1000u, 0x2000u, 42);
-  EXPECT_FALSE(r.Contains(0x0fffu));
-  EXPECT_TRUE(r.Contains(0x1000u));
-  EXPECT_TRUE(r.Contains(0x1fffu));
-  EXPECT_FALSE(r.Contains(0x2000u));
-}
-
-TEST(memory_type_range, range_overlaps) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  {
-    // |<----- a ----->|<----- b ----->|
-    MemoryTypeRange<int> b(a.Limit(), a.Limit() + a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Overlaps(b));
-    EXPECT_FALSE(b.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->| |<----- c ----->|
-    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Overlaps(c));
-    EXPECT_FALSE(c.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //     |<- d ->|
-    MemoryTypeRange<int> d(a.Start() + a.Size() / 4, a.Limit() - a.Size() / 4, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(d));
-    EXPECT_TRUE(d.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    // |<- e ->|
-    MemoryTypeRange<int> e(a.Start(), a.Start() + a.Size() / 2, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(e));
-    EXPECT_TRUE(e.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //         |<- f ->|
-    MemoryTypeRange<int> f(a.Start() + a.Size() / 2, a.Limit(), kMemoryType);
-    EXPECT_TRUE(a.Overlaps(f));
-    EXPECT_TRUE(f.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //        |<----- g ----->|
-    MemoryTypeRange<int> g(a.Start() + a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(g));
-    EXPECT_TRUE(g.Overlaps(a));
-  }
-}
-
-TEST(memory_type_range, range_adjoins) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  {
-    // |<--- a --->|<--- b --->|
-    MemoryTypeRange<int> b(a.Limit(), a.Limit() + a.Size(), kMemoryType);
-    EXPECT_TRUE(a.Adjoins(b));
-    EXPECT_TRUE(b.Adjoins(a));
-  }
-
-  {
-    // |<--- a --->| |<--- c --->|
-    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Adjoins(c));
-    EXPECT_FALSE(c.Adjoins(a));
-  }
-
-  {
-    // |<--- a --->|
-    //       |<--- d --->|
-    MemoryTypeRange<int> d(a.Start() + a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
-    EXPECT_FALSE(a.Adjoins(d));
-    EXPECT_FALSE(d.Adjoins(a));
-  }
-}
-
-TEST(memory_type_range, combinable_with) {
-  // Adjoining ranges of same type.
-  EXPECT_TRUE(MemoryTypeRange<int>(0x1000, 0x2000, 0)
-              .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 0)));
-  EXPECT_TRUE(MemoryTypeRange<int>(0x800, 0x1000, 0)
-              .CombinableWith(MemoryTypeRange<int>(0x1000, 0x2000, 0)));
-  // Adjoining ranges of different types.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x1000, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 1)));
-  EXPECT_FALSE(MemoryTypeRange<int>(0x800, 0x1000, 1)
-               .CombinableWith(MemoryTypeRange<int>(0x1000, 0x2000, 0)));
-  // Disjoint ranges.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x0800, 0x1000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x1f00, 0x2000, 0)));
-  EXPECT_FALSE(MemoryTypeRange<int>(0x1f00, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 0)));
-  // Overlapping ranges.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x0800, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x1f00, 0x2000, 0)));
-}
-
-TEST(memory_type_range, is_valid) {
-  EXPECT_TRUE(MemoryTypeRange<int>(std::numeric_limits<uintptr_t>::min(),
-                                   std::numeric_limits<uintptr_t>::max(),
-                                   0).IsValid());
-  EXPECT_TRUE(MemoryTypeRange<int>(1u, 2u, 0).IsValid());
-  EXPECT_TRUE(MemoryTypeRange<int>(0u, 0u, 0).IsValid());
-  EXPECT_FALSE(MemoryTypeRange<int>(2u, 1u, 0).IsValid());
-  EXPECT_FALSE(MemoryTypeRange<int>(std::numeric_limits<uintptr_t>::max(),
-                                    std::numeric_limits<uintptr_t>::min(),
-                                    0).IsValid());
-}
-
-TEST(memory_type_range, range_equality) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  MemoryTypeRange<int> b(a.Start(), a.Limit(), a.Type());
-  EXPECT_TRUE(a == b);
-  EXPECT_FALSE(a != b);
-
-  MemoryTypeRange<int> c(a.Start() + 1, a.Limit(), a.Type());
-  EXPECT_FALSE(a == c);
-  EXPECT_TRUE(a != c);
-
-  MemoryTypeRange<int> d(a.Start(), a.Limit() + 1, a.Type());
-  EXPECT_FALSE(a == d);
-  EXPECT_TRUE(a != d);
-
-  MemoryTypeRange<int> e(a.Start(), a.Limit(), a.Type() + 1);
-  EXPECT_FALSE(a == e);
-  EXPECT_TRUE(a != e);
-}
-
-TEST(memory_type_table_builder, add_lookup) {
-  MemoryTypeTable<int>::Builder builder;
-  MemoryTypeRange<int> range(0x1000u, 0x2000u, 0);
-  EXPECT_EQ(builder.Size(), 0u);
-  EXPECT_EQ(builder.Add(range), true);
-  EXPECT_EQ(builder.Lookup(range.Start() - 1u), nullptr);
-  EXPECT_EQ(builder.Size(), 1u);
-
-  const MemoryTypeRange<int>* first = builder.Lookup(range.Start());
-  ASSERT_TRUE(first != nullptr);
-  EXPECT_EQ(range, *first);
-
-  const MemoryTypeRange<int>* last = builder.Lookup(range.Limit() - 1u);
-  ASSERT_TRUE(last != nullptr);
-  EXPECT_EQ(range, *last);
-
-  EXPECT_EQ(builder.Lookup(range.Limit()), nullptr);
-}
-
-TEST(memory_type_table_builder, add_lookup_multi) {
-  MemoryTypeTable<char>::Builder builder;
-  MemoryTypeRange<char> ranges[3] = {
-    MemoryTypeRange<char>(0x1, 0x2, 'a'),
-    MemoryTypeRange<char>(0x2, 0x4, 'b'),
-    MemoryTypeRange<char>(0x4, 0x8, 'c'),
-  };
-
-  for (const auto& range : ranges) {
-    builder.Add(range);
-  }
-
-  ASSERT_EQ(builder.Size(), sizeof(ranges) / sizeof(ranges[0]));
-  ASSERT_TRUE(builder.Lookup(0x0) == nullptr);
-  ASSERT_TRUE(builder.Lookup(0x8) == nullptr);
-  for (const auto& range : ranges) {
-    auto first = builder.Lookup(range.Start());
-    ASSERT_TRUE(first != nullptr);
-    EXPECT_EQ(*first, range);
-
-    auto last = builder.Lookup(range.Limit() - 1);
-    ASSERT_TRUE(last != nullptr);
-    EXPECT_EQ(*last, range);
-  }
-}
-
-TEST(memory_type_table_builder, add_overlapping) {
-  MemoryTypeTable<int>::Builder builder;
-  MemoryTypeRange<int> range(0x1000u, 0x2000u, 0);
-  builder.Add(range);
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x2800u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x1800u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1800u, 0x2800u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-}
-
-TEST(memory_type_table_builder, add_zero_size) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1001u, 0)));
-  // Checking adjoining zero length don't get included
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1001u, 0x1001u, 0)));
-  // Check around extremes
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0u, 0x0u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(~0u, ~0u, 0)));
-}
-
-TEST(memory_type_table_builder, add_invalid_range) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x1000u, 0)));
-}
-
-TEST(memory_type_table_builder, add_adjoining) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x1000u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  ASSERT_NE(builder.Lookup(0x0900u), nullptr);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Start(), 0x0800u);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Limit(), 0x2000u);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Type(), 0);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x2100u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x3000u, 0x3100u, 0)));
-  EXPECT_EQ(builder.Size(), 2u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2100u, 0x3000u, 0)));
-  ASSERT_NE(builder.Lookup(0x2000u), nullptr);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Start(), 0x0800u);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Limit(), 0x3100u);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Type(), 0);
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4000u, 0x4100u, 0)));
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4f00u, 0x5000u, 0)));
-  EXPECT_EQ(builder.Size(), 3u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4100u, 0x4f00u, 0)));
-  ASSERT_NE(builder.Lookup(0x4f00u), nullptr);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Start(), 0x4000u);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Limit(), 0x5000u);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Type(), 0);
-  EXPECT_EQ(builder.Size(), 2u);
-  ASSERT_NE(builder.Lookup(0x4f00u), nullptr);
-}
-
-TEST(memory_type_table_builder, add_adjoining_different_type) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x0000u, 0x1000u, 1)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 2)));
-  EXPECT_EQ(builder.Size(), 2u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x3000u, 3)));
-  EXPECT_EQ(builder.Size(), 3u);
-}
-
-TEST(memory_type_table, create) {
-  MemoryTypeTable<int>::Builder builder;
-  builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  builder.Add(MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  builder.Add(MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-
-  MemoryTypeTable<int> table = builder.Build();
-  EXPECT_TRUE(table.Lookup(0x0000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x0800u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x3000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x3fffu) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x5000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(~0u) == nullptr);
-
-  ASSERT_TRUE(table.Lookup(0x1000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x1fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x1000u), MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  EXPECT_EQ(*table.Lookup(0x1fffu), MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  ASSERT_TRUE(table.Lookup(0x2000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x2fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x2000u), MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  EXPECT_EQ(*table.Lookup(0x2fffu), MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  ASSERT_TRUE(table.Lookup(0x4000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x4fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x4000u), MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-  EXPECT_EQ(*table.Lookup(0x4fffu), MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-}
-
-TEST(memory_type_table, find_all) {
-  static constexpr size_t kRangeCount = 64;
-  static constexpr uintptr_t kRangeSize = 1024;
-
-  MemoryTypeTable<int>::Builder builder;
-  for (size_t i = 0; i < kRangeCount; i++) {
-    const uintptr_t start = i * kRangeSize;
-    builder.Add(MemoryTypeRange<int>(start, start + kRangeSize, static_cast<int>(i)));
-  }
-
-  for (size_t delta = 0; delta < kRangeSize; delta += kRangeSize / 2) {
-    for (size_t i = 0; i < kRangeCount; i++) {
-      const uintptr_t start = i * kRangeSize;
-      const MemoryTypeRange<int> expected(start, start + kRangeSize, static_cast<int>(i));
-      const uintptr_t address = i * kRangeSize + delta;
-      const MemoryTypeRange<int>* actual = builder.Lookup(address);
-      ASSERT_TRUE(actual != nullptr) << reinterpret_cast<void*>(address);
-      EXPECT_EQ(expected, *actual) << reinterpret_cast<void*>(address);
-    }
-  }
-
-  MemoryTypeTable<int> table = builder.Build();
-  for (size_t delta = 0; delta < kRangeSize; delta += kRangeSize / 2) {
-    for (size_t i = 0; i < kRangeCount; i++) {
-      const uintptr_t start = i * kRangeSize;
-      const MemoryTypeRange<int> expected(start, start + kRangeSize, static_cast<int>(i));
-      const uintptr_t address = i * kRangeSize + delta;
-      const MemoryTypeRange<int>* actual = table.Lookup(address);
-      ASSERT_TRUE(actual != nullptr) << reinterpret_cast<void*>(address);
-      EXPECT_EQ(expected, *actual) << reinterpret_cast<void*>(address);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 22931dd..a4e9984 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -121,7 +121,6 @@
         "gc/task_processor.cc",
         "gc/verification.cc",
         "hidden_api.cc",
-        "hidden_api_jni.cc",
         "hprof/hprof.cc",
         "image.cc",
         "index_bss_mapping.cc",
diff --git a/runtime/hidden_api_jni.cc b/runtime/hidden_api_jni.cc
deleted file mode 100644
index 074c4c3..0000000
--- a/runtime/hidden_api_jni.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hidden_api_jni.h"
-#include "hidden_api.h"
-
-#if defined(__linux__)
-
-#include <dlfcn.h>
-#include <link.h>
-
-#include <mutex>
-
-#include "android-base/logging.h"
-
-#include "unwindstack/Regs.h"
-#include "unwindstack/RegsGetLocal.h"
-#include "unwindstack/Memory.h"
-#include "unwindstack/Unwinder.h"
-
-#include "base/bit_utils.h"
-#include "base/casts.h"
-#include "base/file_utils.h"
-#include "base/globals.h"
-#include "base/memory_type_table.h"
-#include "base/string_view_cpp20.h"
-
-namespace art {
-namespace hiddenapi {
-
-namespace {
-
-// The maximum number of frames to back trace through when performing Core Platform API checks of
-// native code.
-static constexpr size_t kMaxFrames = 3;
-
-static std::mutex gUnwindingMutex;
-
-struct UnwindHelper {
-  explicit UnwindHelper(size_t max_depth)
-      : memory_(unwindstack::Memory::CreateProcessMemory(getpid())),
-        jit_(memory_),
-        dex_(memory_),
-        unwinder_(max_depth, &maps_, memory_) {
-    CHECK(maps_.Parse());
-    unwinder_.SetJitDebug(&jit_, unwindstack::Regs::CurrentArch());
-    unwinder_.SetDexFiles(&dex_, unwindstack::Regs::CurrentArch());
-    unwinder_.SetResolveNames(false);
-    unwindstack::Elf::SetCachingEnabled(false);
-  }
-
-  unwindstack::Unwinder* Unwinder() { return &unwinder_; }
-
- private:
-  unwindstack::LocalMaps maps_;
-  std::shared_ptr<unwindstack::Memory> memory_;
-  unwindstack::JitDebug jit_;
-  unwindstack::DexFiles dex_;
-  unwindstack::Unwinder unwinder_;
-};
-
-static UnwindHelper& GetUnwindHelper() {
-  static UnwindHelper helper(kMaxFrames);
-  return helper;
-}
-
-}  // namespace
-
-enum class SharedObjectKind {
-  kRuntime = 0,
-  kApexModule = 1,
-  kOther = 2
-};
-
-std::ostream& operator<<(std::ostream& os, SharedObjectKind kind) {
-  switch (kind) {
-    case SharedObjectKind::kRuntime:
-      os << "Runtime";
-      break;
-    case SharedObjectKind::kApexModule:
-      os << "APEX Module";
-      break;
-    case SharedObjectKind::kOther:
-      os << "Other";
-      break;
-  }
-  return os;
-}
-
-// Class holding Cached ranges of loaded shared objects to facilitate checks of field and method
-// resolutions within the Core Platform API for native callers.
-class CodeRangeCache final {
- public:
-  static CodeRangeCache& GetSingleton() {
-    static CodeRangeCache Singleton;
-    return Singleton;
-  }
-
-  SharedObjectKind GetSharedObjectKind(void* pc) {
-    uintptr_t address = reinterpret_cast<uintptr_t>(pc);
-    SharedObjectKind kind;
-    if (Find(address, &kind)) {
-      return kind;
-    }
-    return SharedObjectKind::kOther;
-  }
-
-  bool HasCache() const {
-    return memory_type_table_.Size() != 0;
-  }
-
-  void BuildCache() {
-    DCHECK(!HasCache());
-    art::MemoryTypeTable<SharedObjectKind>::Builder builder;
-    builder_ = &builder;
-    libjavacore_loaded_ = false;
-    libnativehelper_loaded_ = false;
-    libopenjdk_loaded_ = false;
-
-    // Iterate over ELF headers populating table_builder with executable ranges.
-    dl_iterate_phdr(VisitElfInfo, this);
-    memory_type_table_ = builder_->Build();
-
-    // Check expected libraries loaded when iterating headers.
-    CHECK(libjavacore_loaded_);
-    CHECK(libnativehelper_loaded_);
-    CHECK(libopenjdk_loaded_);
-    builder_ = nullptr;
-  }
-
-  void DropCache() {
-    memory_type_table_ = {};
-  }
-
- private:
-  CodeRangeCache() {}
-
-  bool Find(uintptr_t address, SharedObjectKind* kind) const {
-    const art::MemoryTypeRange<SharedObjectKind>* range = memory_type_table_.Lookup(address);
-    if (range == nullptr) {
-      return false;
-    }
-    *kind = range->Type();
-    return true;
-  }
-
-  static int VisitElfInfo(struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, void *data)
-      NO_THREAD_SAFETY_ANALYSIS {
-    auto cache = reinterpret_cast<CodeRangeCache*>(data);
-    art::MemoryTypeTable<SharedObjectKind>::Builder* builder = cache->builder_;
-
-    for (size_t i = 0u; i < info->dlpi_phnum; ++i) {
-      const ElfW(Phdr)& phdr = info->dlpi_phdr[i];
-      if (phdr.p_type != PT_LOAD || ((phdr.p_flags & PF_X) != PF_X)) {
-        continue;  // Skip anything other than code pages
-      }
-      uintptr_t start = info->dlpi_addr + phdr.p_vaddr;
-      const uintptr_t limit = art::RoundUp(start + phdr.p_memsz, art::kPageSize);
-      SharedObjectKind kind = GetKind(info->dlpi_name, start, limit);
-      art::MemoryTypeRange<SharedObjectKind> range{start, limit, kind};
-      if (!builder->Add(range)) {
-        LOG(WARNING) << "Overlapping/invalid range found in ELF headers: " << range;
-      }
-    }
-
-    // Update sanity check state.
-    std::string_view dlpi_name{info->dlpi_name};
-    if (!cache->libjavacore_loaded_) {
-      cache->libjavacore_loaded_ = art::EndsWith(dlpi_name, kLibjavacore);
-    }
-    if (!cache->libnativehelper_loaded_) {
-      cache->libnativehelper_loaded_ = art::EndsWith(dlpi_name, kLibnativehelper);
-    }
-    if (!cache->libopenjdk_loaded_) {
-      cache->libopenjdk_loaded_ = art::EndsWith(dlpi_name, kLibopenjdk);
-    }
-
-    return 0;
-  }
-
-  static SharedObjectKind GetKind(const char* so_name, uintptr_t start, uintptr_t limit) {
-    uintptr_t runtime_method = reinterpret_cast<uintptr_t>(CodeRangeCache::GetKind);
-    if (runtime_method >= start && runtime_method < limit) {
-      return SharedObjectKind::kRuntime;
-    }
-    return art::LocationIsOnApex(so_name) ? SharedObjectKind::kApexModule
-                                          : SharedObjectKind::kOther;
-  }
-
-  art::MemoryTypeTable<SharedObjectKind> memory_type_table_;
-
-  // Table builder, only valid during BuildCache().
-  art::MemoryTypeTable<SharedObjectKind>::Builder* builder_;
-
-  // Sanity checking state.
-  bool libjavacore_loaded_;
-  bool libnativehelper_loaded_;
-  bool libopenjdk_loaded_;
-
-  static constexpr std::string_view kLibjavacore = "libjavacore.so";
-  static constexpr std::string_view kLibnativehelper = "libnativehelper.so";
-  static constexpr std::string_view kLibopenjdk = art::kIsDebugBuild ? "libopenjdkd.so"
-                                                                     : "libopenjdk.so";
-
-  DISALLOW_COPY_AND_ASSIGN(CodeRangeCache);
-};
-
-// Cookie for tracking approvals of Core Platform API use. The Thread class has a per thread field
-// that stores these values. This is necessary because we can't change the JNI interfaces and some
-// paths call into each other, ie checked JNI typically calls plain JNI.
-struct CorePlatformApiCookie final {
-  bool approved:1;  // Whether the outermost ScopedCorePlatformApiCheck instance is approved.
-  uint32_t depth:31;  // Count of nested ScopedCorePlatformApiCheck instances.
-};
-
-ScopedCorePlatformApiCheck::ScopedCorePlatformApiCheck() {
-  Thread* self = Thread::Current();
-  CorePlatformApiCookie cookie =
-      bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie());
-  bool is_core_platform_api_approved = false;  // Default value for non-device testing.
-  if (!kIsTargetBuild) {
-    // On target device, if policy says enforcement is disabled, then treat all callers as
-    // approved.
-    auto policy = Runtime::Current()->GetCorePlatformApiEnforcementPolicy();
-    if (policy == hiddenapi::EnforcementPolicy::kDisabled) {
-      is_core_platform_api_approved = true;
-    } else if (cookie.depth == 0) {
-      // On target device, only check the caller at depth 0 (the outermost entry into JNI
-      // interface).
-      DCHECK_EQ(cookie.approved, false);
-      void* caller_pc = CaptureCallerPc();
-      if (caller_pc != nullptr) {
-        SharedObjectKind kind = CodeRangeCache::GetSingleton().GetSharedObjectKind(caller_pc);
-        is_core_platform_api_approved = ((kind == SharedObjectKind::kRuntime) ||
-                                         (kind == SharedObjectKind::kApexModule));
-      }
-    }
-  }
-
-  // Update cookie
-  if (is_core_platform_api_approved) {
-    cookie.approved = true;
-  }
-  cookie.depth += 1;
-  self->SetCorePlatformApiCookie(bit_cast<uint32_t, CorePlatformApiCookie>(cookie));
-}
-
-ScopedCorePlatformApiCheck::~ScopedCorePlatformApiCheck() {
-  Thread* self = Thread::Current();
-  // Update cookie, decrementing depth and clearing approved flag if this is the outermost
-  // instance.
-  CorePlatformApiCookie cookie =
-      bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie());
-  DCHECK_NE(cookie.depth, 0u);
-  cookie.depth -= 1u;
-  if (cookie.depth == 0u) {
-    cookie.approved = false;
-  }
-  self->SetCorePlatformApiCookie(bit_cast<uint32_t, CorePlatformApiCookie>(cookie));
-}
-
-bool ScopedCorePlatformApiCheck::IsCurrentCallerApproved(Thread* self) {
-  CorePlatformApiCookie cookie =
-      bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie());
-  DCHECK_GT(cookie.depth, 0u);
-  return cookie.approved;
-}
-
-void* ScopedCorePlatformApiCheck::CaptureCallerPc() {
-  std::lock_guard<std::mutex> guard(gUnwindingMutex);
-  unwindstack::Unwinder* unwinder = GetUnwindHelper().Unwinder();
-  std::unique_ptr<unwindstack::Regs> regs(unwindstack::Regs::CreateFromLocal());
-  RegsGetLocal(regs.get());
-  unwinder->SetRegs(regs.get());
-  unwinder->Unwind();
-  for (auto it = unwinder->frames().begin(); it != unwinder->frames().end(); ++it) {
-    // Unwind to frame above the tlsJniStackMarker. The stack markers should be on the first frame
-    // calling JNI methods.
-    if (it->sp > reinterpret_cast<uint64_t>(this)) {
-      return reinterpret_cast<void*>(it->pc);
-    }
-  }
-  return nullptr;
-}
-
-void JniInitializeNativeCallerCheck() {
-  // This method should be called only once and before there are multiple runtime threads.
-  DCHECK(!CodeRangeCache::GetSingleton().HasCache());
-  CodeRangeCache::GetSingleton().BuildCache();
-}
-
-void JniShutdownNativeCallerCheck() {
-  CodeRangeCache::GetSingleton().DropCache();
-}
-
-}  // namespace hiddenapi
-}  // namespace art
-
-#else  // __linux__
-
-namespace art {
-namespace hiddenapi {
-
-ScopedCorePlatformApiCheck::ScopedCorePlatformApiCheck() {}
-
-ScopedCorePlatformApiCheck::~ScopedCorePlatformApiCheck() {}
-
-bool ScopedCorePlatformApiCheck::IsCurrentCallerApproved(Thread* self ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-void JniInitializeNativeCallerCheck() {}
-
-void JniShutdownNativeCallerCheck() {}
-
-}  // namespace hiddenapi
-}  // namespace art
-
-#endif  // __linux__
diff --git a/runtime/hidden_api_jni.h b/runtime/hidden_api_jni.h
deleted file mode 100644
index a084378..0000000
--- a/runtime/hidden_api_jni.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_HIDDEN_API_JNI_H_
-#define ART_RUNTIME_HIDDEN_API_JNI_H_
-
-#include "base/macros.h"
-
-namespace art {
-
-class Thread;
-
-namespace hiddenapi {
-
-// Stack markers that should be instantiated in JNI Get{Field,Method}Id methods (and
-// their static equivalents) to allow native caller checks to take place.
-class ScopedCorePlatformApiCheck final {
- public:
-  ScopedCorePlatformApiCheck();
-  ~ScopedCorePlatformApiCheck();
-
-  // Check whether the caller is automatically approved based on location. Code in the run-time or
-  // in an APEX is considered to be automatically approved.
-  static bool IsCurrentCallerApproved(Thread* self);
-
- private:
-  // Captures calling PC for frame above the frame allocating the current ScopedCorePlatformApiCheck
-  // instance.
-  void* CaptureCallerPc();
-
-  // Instances should only be stack allocated, copy and assignment not useful.
-  DISALLOW_ALLOCATION();
-  DISALLOW_COPY_AND_ASSIGN(ScopedCorePlatformApiCheck);
-};
-
-void JniInitializeNativeCallerCheck();
-void JniShutdownNativeCallerCheck();
-
-}  // namespace hiddenapi
-}  // namespace art
-
-#endif  // ART_RUNTIME_HIDDEN_API_JNI_H_
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 3491280..4a8453a 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -35,7 +35,6 @@
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
 #include "gc/space/space.h"
-#include "hidden_api_jni.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
 #include "mirror/class-inl.h"
@@ -2222,22 +2221,18 @@
   }
 
   static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, false);
   }
 
   static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, true);
   }
 
   static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, false);
   }
 
   static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, true);
   }
 
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 882e10f..67073a8e 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -36,7 +36,6 @@
 #include "dex/utf.h"
 #include "fault_handler.h"
 #include "hidden_api.h"
-#include "hidden_api_jni.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc_root.h"
 #include "indirect_reference_table-inl.h"
@@ -88,9 +87,6 @@
 template<typename T>
 ALWAYS_INLINE static bool ShouldDenyAccessToMember(T* member, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (hiddenapi::ScopedCorePlatformApiCheck::IsCurrentCallerApproved(self)) {
-    return false;
-  }
   return hiddenapi::ShouldDenyAccessToMember(
       member,
       [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -825,7 +821,6 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return FindMethodID<kEnableIndexIds>(soa, java_class, name, sig, false);
   }
 
@@ -835,7 +830,6 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return FindMethodID<kEnableIndexIds>(soa, java_class, name, sig, true);
   }
 
@@ -1368,7 +1362,6 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return FindFieldID<kEnableIndexIds>(soa, java_class, name, sig, false);
   }
 
@@ -1378,7 +1371,6 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck sc;
     return FindFieldID<kEnableIndexIds>(soa, java_class, name, sig, true);
   }
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1906b53..32ebfa6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -86,7 +86,6 @@
 #include "gc/task_processor.h"
 #include "handle_scope-inl.h"
 #include "hidden_api.h"
-#include "hidden_api_jni.h"
 #include "image-inl.h"
 #include "instrumentation.h"
 #include "intern_table-inl.h"
@@ -510,8 +509,6 @@
   // instance. We rely on a small initialization order issue in Runtime::Start() that requires
   // elements of WellKnownClasses to be null, see b/65500943.
   WellKnownClasses::Clear();
-
-  hiddenapi::JniShutdownNativeCallerCheck();
 }
 
 struct AbortState {
@@ -1909,10 +1906,6 @@
   // Initialize well known classes that may invoke runtime native methods.
   WellKnownClasses::LateInit(env);
 
-  // Having loaded native libraries for Managed Core library, enable field and
-  // method resolution checks via JNI from native code.
-  hiddenapi::JniInitializeNativeCallerCheck();
-
   VLOG(startup) << "Runtime::InitNativeMethods exiting";
 }
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 002bacb..486191f 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -28,7 +28,6 @@
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "hidden_api.h"
-#include "hidden_api_jni.h"
 #include "jni/jni_internal.h"
 #include "jni_id_type.h"
 #include "mirror/class.h"
@@ -178,7 +177,6 @@
   jfieldID fid;
   {
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck scpac;
     if (Runtime::Current()->GetJniIdType() != JniIdType::kSwapablePointer) {
       fid = jni::EncodeArtField</*kEnableIndexIds*/ true>(
           FindFieldJNI(soa, c, name, signature, is_static));
@@ -205,7 +203,6 @@
   jmethodID mid;
   {
     ScopedObjectAccess soa(env);
-    hiddenapi::ScopedCorePlatformApiCheck scpac;
     if (Runtime::Current()->GetJniIdType() != JniIdType::kSwapablePointer) {
       mid = jni::EncodeArtMethod</*kEnableIndexIds*/ true>(
           FindMethodJNI(soa, c, name, signature, is_static));