stdint types all the way!

Change-Id: I4e4ef3a2002fc59ebd9097087f150eaf3f2a7e08
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index fbaed9f..e3eb9e9 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -144,12 +144,12 @@
 
 OatFile::OatMethod CommonCompilerTest::CreateOatMethod(const void* code, const uint8_t* gc_map) {
   CHECK(code != nullptr);
-  const byte* base;
+  const uint8_t* base;
   uint32_t code_offset, gc_map_offset;
   if (gc_map == nullptr) {
-    base = reinterpret_cast<const byte*>(code);  // Base of data points at code.
-    base -= kPointerSize;  // Move backward so that code_offset != 0.
-    code_offset = kPointerSize;
+    base = reinterpret_cast<const uint8_t*>(code);  // Base of data points at code.
+    base -= sizeof(void*);  // Move backward so that code_offset != 0.
+    code_offset = sizeof(void*);
     gc_map_offset = 0;
   } else {
     // TODO: 64bit support.
@@ -398,7 +398,7 @@
   // accidentally end up colliding with the fixed memory address when we need to load the image.
   std::string error_msg;
   image_reservation_.reset(MemMap::MapAnonymous("image reservation",
-                                                reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+                                                reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
                                                 (size_t)100 * 1024 * 1024,  // 100MB
                                                 PROT_NONE,
                                                 false /* no need for 4gb flag with fixed mmap*/,
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7e83c0c..7dfdc76 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -352,7 +352,7 @@
   }
 
   // Iterate over each of the handlers to enqueue the empty Catch blocks.
-  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
+  const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
     CatchHandlerIterator iterator(handlers_ptr);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index cdb816d..fb648fc 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -627,7 +627,7 @@
   if (code_item->tries_size_ == 0) {
     return;  // nothing to process
   }
-  const byte* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
+  const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
   size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
   for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
     int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
@@ -1505,7 +1505,7 @@
   // Note the class_data pointer advances through the headers,
   // static fields, instance fields, direct methods, and virtual
   // methods.
-  const byte* class_data = dex_file.GetClassData(class_def);
+  const uint8_t* class_data = dex_file.GetClassData(class_def);
   if (class_data == nullptr) {
     // Empty class such as a marker interface.
     requires_constructor_barrier = false;
@@ -1882,7 +1882,7 @@
   if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) {
     return;
   }
-  const byte* class_data = dex_file.GetClassData(class_def);
+  const uint8_t* class_data = dex_file.GetClassData(class_def);
   if (class_data == nullptr) {
     // empty class, probably a marker interface
     return;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 5834e8e..cf4259f 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -163,8 +163,8 @@
 
   gc::space::ImageSpace* image_space = heap->GetImageSpace();
   image_space->VerifyImageAllocations();
-  byte* image_begin = image_space->Begin();
-  byte* image_end = image_space->End();
+  uint8_t* image_begin = image_space->Begin();
+  uint8_t* image_end = image_space->End();
   CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
   for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
     const DexFile::ClassDef& class_def = dex->GetClassDef(i);
@@ -173,11 +173,11 @@
     EXPECT_TRUE(klass != nullptr) << descriptor;
     if (image_classes.find(descriptor) != image_classes.end()) {
       // Image classes should be located inside the image.
-      EXPECT_LT(image_begin, reinterpret_cast<byte*>(klass)) << descriptor;
-      EXPECT_LT(reinterpret_cast<byte*>(klass), image_end) << descriptor;
+      EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
+      EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
     } else {
-      EXPECT_TRUE(reinterpret_cast<byte*>(klass) >= image_end ||
-                  reinterpret_cast<byte*>(klass) < image_begin) << descriptor;
+      EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
+                  reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
     }
     EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
   }
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6fff5f4..35a3d4b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -559,10 +559,10 @@
 
 void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
   CHECK_NE(0U, oat_loaded_size);
-  const byte* oat_file_begin = GetOatFileBegin();
-  const byte* oat_file_end = oat_file_begin + oat_loaded_size;
+  const uint8_t* oat_file_begin = GetOatFileBegin();
+  const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
   oat_data_begin_ = oat_file_begin + oat_data_offset;
-  const byte* oat_data_end = oat_data_begin_ + oat_file_->Size();
+  const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
 
   // Return to write header at start of image with future location of image_roots. At this point,
   // image_end_ is the size of the image (excluding bitmaps).
@@ -604,8 +604,8 @@
   ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
   // see GetLocalAddress for similar computation
   size_t offset = image_writer->GetImageOffset(obj);
-  byte* dst = image_writer->image_->Begin() + offset;
-  const byte* src = reinterpret_cast<const byte*>(obj);
+  uint8_t* dst = image_writer->image_->Begin() + offset;
+  const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
   size_t n = obj->SizeOf();
   DCHECK_LT(offset + n, image_writer->image_->Size());
   memcpy(dst, src, n);
@@ -688,7 +688,7 @@
   }
 }
 
-const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
+const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
   DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
          !method->IsAbstract()) << PrettyMethod(method);
 
@@ -696,7 +696,7 @@
   // trampoline.
 
   // Quick entrypoint:
-  const byte* quick_code = GetOatAddress(method->GetQuickOatCodeOffset());
+  const uint8_t* quick_code = GetOatAddress(method->GetQuickOatCodeOffset());
   *quick_is_interpreted = false;
   if (quick_code != nullptr &&
       (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) {
@@ -718,7 +718,7 @@
   return quick_code;
 }
 
-const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
+const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
   // Calculate the quick entry point following the same logic as FixupMethod() below.
   // The resolution method has a special trampoline to call.
   if (UNLIKELY(method == Runtime::Current()->GetResolutionMethod())) {
@@ -757,14 +757,14 @@
       copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
       copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
       copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
-          (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+          (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
     } else {
       bool quick_is_interpreted;
-      const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted);
+      const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
       copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
 
       // Portable entrypoint:
-      const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
+      const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
       bool portable_is_interpreted = false;
       if (portable_code != nullptr &&
           (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
@@ -794,7 +794,7 @@
       } else {
         // Normal (non-abstract non-native) methods have various tables to relocate.
         uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
-        const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
+        const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
         copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
       }
 
@@ -805,7 +805,7 @@
           : interpreter_to_compiled_code_bridge_offset_;
       copy->SetEntryPointFromInterpreter<kVerifyNone>(
           reinterpret_cast<EntryPointFromInterpreter*>(
-              const_cast<byte*>(GetOatAddress(interpreter_code))));
+              const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
     }
   }
 }
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index bdf0614..e6a98d1 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -38,7 +38,7 @@
 class ImageWriter {
  public:
   ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin)
-      : compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<byte*>(image_begin)),
+      : compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
         image_end_(0), image_roots_address_(0), oat_file_(NULL),
         oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
         interpreter_to_compiled_code_bridge_offset_(0), jni_dlsym_lookup_offset_(0),
@@ -65,7 +65,7 @@
     return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
   }
 
-  byte* GetOatFileBegin() const {
+  uint8_t* GetOatFileBegin() const {
     return image_begin_ + RoundUp(image_end_, kPageSize);
   }
 
@@ -100,11 +100,11 @@
   mirror::Object* GetLocalAddress(mirror::Object* object) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     size_t offset = GetImageOffset(object);
-    byte* dst = image_->Begin() + offset;
+    uint8_t* dst = image_->Begin() + offset;
     return reinterpret_cast<mirror::Object*>(dst);
   }
 
-  const byte* GetOatAddress(uint32_t offset) const {
+  const uint8_t* GetOatAddress(uint32_t offset) const {
 #if !defined(ART_USE_PORTABLE_COMPILER)
     // With Quick, code is within the OatFile, as there are all in one
     // .o ELF object. However with Portable, the code is always in
@@ -171,10 +171,10 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
-  const byte* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted)
+  const uint8_t* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const byte* GetQuickEntryPoint(mirror::ArtMethod* method)
+  const uint8_t* GetQuickEntryPoint(mirror::ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Patches references in OatFile to expect runtime addresses.
@@ -183,7 +183,7 @@
   const CompilerDriver& compiler_driver_;
 
   // Beginning target image address for the output image.
-  byte* image_begin_;
+  uint8_t* image_begin_;
 
   // Offset to the free space in image_.
   size_t image_end_;
@@ -201,7 +201,7 @@
   std::vector<std::pair<mirror::Object*, uint32_t>> saved_hashes_;
 
   // Beginning target oat address for the pointers from the output image to its oat file.
-  const byte* oat_data_begin_;
+  const uint8_t* oat_data_begin_;
 
   // Image bitmap which lets us know where the objects inside of the image reside.
   std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0b1f9e2..3fcc369 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -155,7 +155,7 @@
   ScopedObjectAccess soa(Thread::Current());
   for (size_t i = 0; i < dex_file->NumClassDefs(); i++) {
     const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
-    const byte* class_data = dex_file->GetClassData(class_def);
+    const uint8_t* class_data = dex_file->GetClassData(class_def);
     size_t num_virtual_methods = 0;
     if (class_data != NULL) {
       ClassDataItemIterator it(*dex_file, class_data);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index e64d2ab..be52f40 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1208,7 +1208,7 @@
         return false;
       }
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
-      const byte* class_data = dex_file->GetClassData(class_def);
+      const uint8_t* class_data = dex_file->GetClassData(class_def);
       if (class_data != NULL) {  // ie not an empty class, such as a marker interface
         ClassDataItemIterator it(*dex_file, class_data);
         while (it.HasNextStaticField()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 24d7ad3..dcf70f2 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -95,7 +95,7 @@
 
   static Location ConstantLocation(HConstant* constant) {
     DCHECK(constant != nullptr);
-    return Location(kConstant | reinterpret_cast<uword>(constant));
+    return Location(kConstant | reinterpret_cast<uintptr_t>(constant));
   }
 
   HConstant* GetConstant() const {
@@ -170,7 +170,7 @@
   }
 
   static Location StackSlot(intptr_t stack_index) {
-    uword payload = EncodeStackIndex(stack_index);
+    uintptr_t payload = EncodeStackIndex(stack_index);
     Location loc(kStackSlot, payload);
     // Ensure that sign is preserved.
     DCHECK_EQ(loc.GetStackIndex(), stack_index);
@@ -182,7 +182,7 @@
   }
 
   static Location DoubleStackSlot(intptr_t stack_index) {
-    uword payload = EncodeStackIndex(stack_index);
+    uintptr_t payload = EncodeStackIndex(stack_index);
     Location loc(kDoubleStackSlot, payload);
     // Ensure that sign is preserved.
     DCHECK_EQ(loc.GetStackIndex(), stack_index);
@@ -288,27 +288,27 @@
     return PolicyField::Decode(GetPayload());
   }
 
-  uword GetEncoding() const {
+  uintptr_t GetEncoding() const {
     return GetPayload();
   }
 
  private:
   // Number of bits required to encode Kind value.
   static constexpr uint32_t kBitsForKind = 4;
-  static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
-  static constexpr uword kLocationConstantMask = 0x3;
+  static constexpr uint32_t kBitsForPayload = kBitsPerIntPtrT - kBitsForKind;
+  static constexpr uintptr_t kLocationConstantMask = 0x3;
 
-  explicit Location(uword value) : value_(value) {}
+  explicit Location(uintptr_t value) : value_(value) {}
 
-  Location(Kind kind, uword payload)
+  Location(Kind kind, uintptr_t payload)
       : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
 
-  uword GetPayload() const {
+  uintptr_t GetPayload() const {
     return PayloadField::Decode(value_);
   }
 
   typedef BitField<Kind, 0, kBitsForKind> KindField;
-  typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
+  typedef BitField<uintptr_t, kBitsForKind, kBitsForPayload> PayloadField;
 
   // Layout for kUnallocated locations payload.
   typedef BitField<Policy, 0, 3> PolicyField;
@@ -320,7 +320,7 @@
   // Location either contains kind and payload fields or a tagged handle for
   // a constant locations. Values of enumeration Kind are selected in such a
   // way that none of them can be interpreted as a kConstant tag.
-  uword value_;
+  uintptr_t value_;
 };
 
 class RegisterSet : public ValueObject {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 637a1ff..b430c7e 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -386,7 +386,7 @@
 void ArmAssembler::Pad(uint32_t bytes) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   for (uint32_t i = 0; i < bytes; ++i) {
-    buffer_.Emit<byte>(0);
+    buffer_.Emit<uint8_t>(0);
   }
 }
 
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 54965f6..14d48b7 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -600,7 +600,7 @@
   virtual void Ror(Register rd, Register rm, Register rn, bool setcc = false,
                    Condition cond = AL) = 0;
 
-  static bool IsInstructionForExceptionHandling(uword pc);
+  static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
   virtual void Bind(Label* label) = 0;
 
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 7f9094d..c89fd04 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -273,7 +273,7 @@
                       Condition cond = AL) OVERRIDE;
 
 
-  static bool IsInstructionForExceptionHandling(uword pc);
+  static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
   // Emit data (e.g. encoded instruction or immediate) to the
   // instruction stream.
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index ee33bf2..9e7d394 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -303,7 +303,7 @@
                       Condition cond = AL) OVERRIDE;
 
 
-  static bool IsInstructionForExceptionHandling(uword pc);
+  static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
   // Emit data (e.g. encoded instruction or immediate) to the.
   // instruction stream.
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 3e4cd43..092c891 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -223,7 +223,7 @@
 // Example: Test whether the instruction at ptr does set the condition code
 // bits.
 //
-// bool InstructionSetsConditionCodes(byte* ptr) {
+// bool InstructionSetsConditionCodes(uint8_t* ptr) {
 //   Instr* instr = Instr::At(ptr);
 //   int type = instr->TypeField();
 //   return ((type == 0) || (type == 1)) && instr->HasS();
@@ -435,7 +435,7 @@
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instr.
   // Use the At(pc) function to create references to Instr.
-  static Instr* At(uword pc) { return reinterpret_cast<Instr*>(pc); }
+  static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
   Instr* Next() { return this + kInstrSize; }
 
  private:
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 373fd34..ef83334 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -214,7 +214,7 @@
   void AddConstant(Register rd, Register rn, int32_t value, vixl::Condition cond = vixl::al);
 
   // Vixl assembler.
-  vixl::MacroAssembler* vixl_masm_;
+  vixl::MacroAssembler* const vixl_masm_;
 
   // List of exception blocks to generate at the end of the code cache.
   std::vector<Arm64Exception*> exception_blocks_;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index e3045e1..8a1289d 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -30,8 +30,8 @@
 
 namespace art {
 
-static byte* NewContents(size_t capacity) {
-  return new byte[capacity];
+static uint8_t* NewContents(size_t capacity) {
+  return new uint8_t[capacity];
 }
 
 
@@ -85,7 +85,7 @@
   size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB);
 
   // Allocate the new data area and copy contents of the old one to it.
-  byte* new_contents = NewContents(new_capacity);
+  uint8_t* new_contents = NewContents(new_capacity);
   memmove(reinterpret_cast<void*>(new_contents),
           reinterpret_cast<void*>(contents_),
           old_size);
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 4addfa0..91b8d8ab 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -56,19 +56,19 @@
 
 class ExternalLabel {
  public:
-  ExternalLabel(const char* name, uword address)
+  ExternalLabel(const char* name, uintptr_t address)
       : name_(name), address_(address) {
     DCHECK(name != nullptr);
   }
 
   const char* name() const { return name_; }
-  uword address() const {
+  uintptr_t address() const {
     return address_;
   }
 
  private:
   const char* name_;
-  const uword address_;
+  const uintptr_t address_;
 };
 
 class Label {
@@ -84,12 +84,12 @@
   // for unused labels.
   int Position() const {
     CHECK(!IsUnused());
-    return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
+    return IsBound() ? -position_ - sizeof(void*) : position_ - sizeof(void*);
   }
 
   int LinkPosition() const {
     CHECK(IsLinked());
-    return position_ - kPointerSize;
+    return position_ - sizeof(void*);
   }
 
   bool IsBound() const { return position_ < 0; }
@@ -105,13 +105,13 @@
 
   void BindTo(int position) {
     CHECK(!IsBound());
-    position_ = -position - kPointerSize;
+    position_ = -position - sizeof(void*);
     CHECK(IsBound());
   }
 
   void LinkTo(int position) {
     CHECK(!IsBound());
-    position_ = position + kPointerSize;
+    position_ = position + sizeof(void*);
     CHECK(IsLinked());
   }
 
@@ -236,7 +236,7 @@
     return cursor_ - contents_;
   }
 
-  byte* contents() const { return contents_; }
+  uint8_t* contents() const { return contents_; }
 
   // Copy the assembled instructions into the specified memory block
   // and apply all fixups.
@@ -316,9 +316,9 @@
   // for a single, fast space check per instruction.
   static const int kMinimumGap = 32;
 
-  byte* contents_;
-  byte* cursor_;
-  byte* limit_;
+  uint8_t* contents_;
+  uint8_t* cursor_;
+  uint8_t* limit_;
   AssemblerFixup* fixup_;
 #ifndef NDEBUG
   bool fixups_processed_;
@@ -327,8 +327,8 @@
   // Head of linked list of slow paths
   SlowPath* slow_path_;
 
-  byte* cursor() const { return cursor_; }
-  byte* limit() const { return limit_; }
+  uint8_t* cursor() const { return cursor_; }
+  uint8_t* limit() const { return limit_; }
   size_t Capacity() const {
     CHECK_GE(limit_, contents_);
     return (limit_ - contents_) + kMinimumGap;
@@ -340,7 +340,7 @@
 
   // Compute the limit based on the data area and the capacity. See
   // description of kMinimumGap for the reasoning behind the value.
-  static byte* ComputeLimit(byte* data, size_t capacity) {
+  static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
     return data + capacity - kMinimumGap;
   }
 
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 3ff24b7..4ddf979 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1291,7 +1291,7 @@
   pushl(Immediate(High32Bits(constant)));
   pushl(Immediate(Low32Bits(constant)));
   movsd(dst, Address(ESP, 0));
-  addl(ESP, Immediate(2 * kWordSize));
+  addl(ESP, Immediate(2 * sizeof(intptr_t)));
 }
 
 
@@ -1303,7 +1303,7 @@
     uint32_t d;
   } float_negate_constant __attribute__((aligned(16))) =
       { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
-  xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+  xorps(f, Address::Absolute(reinterpret_cast<uintptr_t>(&float_negate_constant)));
 }
 
 
@@ -1313,7 +1313,7 @@
     uint64_t b;
   } double_negate_constant __attribute__((aligned(16))) =
       {0x8000000000000000LL, 0x8000000000000000LL};
-  xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+  xorpd(d, Address::Absolute(reinterpret_cast<uintptr_t>(&double_negate_constant)));
 }
 
 
@@ -1323,7 +1323,7 @@
     uint64_t b;
   } double_abs_constant __attribute__((aligned(16))) =
       {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
-  andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+  andpd(reg, Address::Absolute(reinterpret_cast<uintptr_t>(&double_abs_constant)));
 }
 
 
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 1f6f7e6..c7eada3 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -116,8 +116,8 @@
   }
 
  private:
-  byte length_;
-  byte encoding_[6];
+  uint8_t length_;
+  uint8_t encoding_[6];
 
   explicit Operand(Register reg) { SetModRM(3, reg); }
 
@@ -192,7 +192,7 @@
     }
   }
 
-  static Address Absolute(uword addr) {
+  static Address Absolute(uintptr_t addr) {
     Address result;
     result.SetModRM(0, EBP);
     result.SetDisp32(addr);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 705b639..75823e3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1490,7 +1490,7 @@
   pushq(Immediate(High32Bits(constant)));
   pushq(Immediate(Low32Bits(constant)));
   movsd(dst, Address(CpuRegister(RSP), 0));
-  addq(CpuRegister(RSP), Immediate(2 * kWordSize));
+  addq(CpuRegister(RSP), Immediate(2 * sizeof(intptr_t)));
 }
 
 
@@ -1502,7 +1502,7 @@
     uint32_t d;
   } float_negate_constant __attribute__((aligned(16))) =
       { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
-  xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+  xorps(f, Address::Absolute(reinterpret_cast<uintptr_t>(&float_negate_constant)));
 }
 
 
@@ -1512,7 +1512,7 @@
     uint64_t b;
   } double_negate_constant __attribute__((aligned(16))) =
       {0x8000000000000000LL, 0x8000000000000000LL};
-  xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+  xorpd(d, Address::Absolute(reinterpret_cast<uintptr_t>(&double_negate_constant)));
 }
 
 
@@ -1522,7 +1522,7 @@
     uint64_t b;
   } double_abs_constant __attribute__((aligned(16))) =
       {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
-  andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+  andpd(reg, Address::Absolute(reinterpret_cast<uintptr_t>(&double_abs_constant)));
 }
 
 
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 268f72b..1d9eba4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -227,7 +227,7 @@
   }
 
   // If no_rip is true then the Absolute address isn't RIP relative.
-  static Address Absolute(uword addr, bool no_rip = false) {
+  static Address Absolute(uintptr_t addr, bool no_rip = false) {
     Address result;
     if (no_rip) {
       result.SetModRM(0, CpuRegister(RSP));
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 51b7a98..1f2c0aa 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -219,7 +219,7 @@
 
   void WalkOatClass(const OatFile::OatClass& oat_class, const DexFile& dex_file,
                     const DexFile::ClassDef& class_def, Callback callback) {
-    const byte* class_data = dex_file.GetClassData(class_def);
+    const uint8_t* class_data = dex_file.GetClassData(class_def);
     if (class_data == nullptr) {  // empty class such as a marker interface?
       return;
     }
@@ -482,8 +482,8 @@
   }
 
   size_t ComputeSize(const void* oat_data) {
-    if (reinterpret_cast<const byte*>(oat_data) < oat_file_.Begin() ||
-        reinterpret_cast<const byte*>(oat_data) > oat_file_.End()) {
+    if (reinterpret_cast<const uint8_t*>(oat_data) < oat_file_.Begin() ||
+        reinterpret_cast<const uint8_t*>(oat_data) > oat_file_.End()) {
       return 0;  // Address not in oat file
     }
     uintptr_t begin_offset = reinterpret_cast<uintptr_t>(oat_data) -
@@ -543,7 +543,7 @@
            class_def_index++) {
         const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
         const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
-        const byte* class_data = dex_file->GetClassData(class_def);
+        const uint8_t* class_data = dex_file->GetClassData(class_def);
         if (class_data != nullptr) {
           ClassDataItemIterator it(*dex_file, class_data);
           SkipAllFields(it);
@@ -631,7 +631,7 @@
   bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
                     const DexFile::ClassDef& class_def) {
     bool success = true;
-    const byte* class_data = dex_file.GetClassData(class_def);
+    const uint8_t* class_data = dex_file.GetClassData(class_def);
     if (class_data == nullptr) {  // empty class such as a marker interface?
       os << std::flush;
       return success;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index bbaf0e4..fbb36f3 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -385,7 +385,7 @@
   if (obj == nullptr) {
     return nullptr;
   } else {
-    return reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj) + delta_);
+    return reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(obj) + delta_);
   }
 }
 
@@ -608,7 +608,7 @@
   patch_loc_t* patches_end = patches + (patches_sec->sh_size / sizeof(patch_loc_t));
   auto oat_text_sec = oat_file->FindSectionByName(".text");
   CHECK(oat_text_sec != nullptr);
-  byte* to_patch = oat_file->Begin() + oat_text_sec->sh_offset;
+  uint8_t* to_patch = oat_file->Begin() + oat_text_sec->sh_offset;
   uintptr_t to_patch_end = reinterpret_cast<uintptr_t>(to_patch) + oat_text_sec->sh_size;
 
   for (; patches < patches_end; patches++) {
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 37049cf..a7beaa9 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -81,7 +81,7 @@
     gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : X86Context::kBadGprBase + i;
   }
   // We want to load the stack pointer one slot below so that the ret will pop eip.
-  uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize;
+  uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
   gprs[kNumberOfCpuRegisters] = esp;
   *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
   __asm__ __volatile__(
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 7699eaf..79d0666 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -129,7 +129,7 @@
   }
 
   // We want to load the stack pointer one slot below so that the ret will pop eip.
-  uintptr_t rsp = gprs[kNumberOfCpuRegisters - RSP - 1] - kWordSize;
+  uintptr_t rsp = gprs[kNumberOfCpuRegisters - RSP - 1] - sizeof(intptr_t);
   gprs[kNumberOfCpuRegisters] = rsp;
   *(reinterpret_cast<uintptr_t*>(rsp)) = rip_;
 
diff --git a/runtime/base/bit_field.h b/runtime/base/bit_field.h
index e041bd0..fd65d50 100644
--- a/runtime/base/bit_field.h
+++ b/runtime/base/bit_field.h
@@ -22,7 +22,7 @@
 
 namespace art {
 
-static const uword kUwordOne = 1U;
+static constexpr uintptr_t kUintPtrTOne = 1U;
 
 // BitField is a template for encoding and decoding a bit field inside
 // an unsigned machine word.
@@ -31,18 +31,18 @@
  public:
   // Tells whether the provided value fits into the bit field.
   static bool IsValid(T value) {
-    return (static_cast<uword>(value) & ~((kUwordOne << size) - 1)) == 0;
+    return (static_cast<uintptr_t>(value) & ~((kUintPtrTOne << size) - 1)) == 0;
   }
 
   // Returns a uword mask of the bit field.
-  static uword Mask() {
-    return (kUwordOne << size) - 1;
+  static uintptr_t Mask() {
+    return (kUintPtrTOne << size) - 1;
   }
 
   // Returns a uword mask of the bit field which can be applied directly to
   // the raw unshifted bits.
-  static uword MaskInPlace() {
-    return ((kUwordOne << size) - 1) << position;
+  static uintptr_t MaskInPlace() {
+    return ((kUintPtrTOne << size) - 1) << position;
   }
 
   // Returns the shift count needed to right-shift the bit field to
@@ -57,22 +57,22 @@
   }
 
   // Returns a uword with the bit field value encoded.
-  static uword Encode(T value) {
+  static uintptr_t Encode(T value) {
     DCHECK(IsValid(value));
-    return static_cast<uword>(value) << position;
+    return static_cast<uintptr_t>(value) << position;
   }
 
   // Extracts the bit field from the value.
-  static T Decode(uword value) {
-    return static_cast<T>((value >> position) & ((kUwordOne << size) - 1));
+  static T Decode(uintptr_t value) {
+    return static_cast<T>((value >> position) & ((kUintPtrTOne << size) - 1));
   }
 
   // Returns a uword with the bit field value encoded based on the
   // original value. Only the bits corresponding to this bit field
   // will be changed.
-  static uword Update(T value, uword original) {
+  static uintptr_t Update(T value, uintptr_t original) {
     DCHECK(IsValid(value));
-    return (static_cast<uword>(value) << position) |
+    return (static_cast<uintptr_t>(value) << position) |
         (~MaskInPlace() & original);
   }
 };
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 936c52b..5423ff0 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -35,7 +35,7 @@
   static const char gHexDigit[] = "0123456789abcdef";
   const unsigned char* addr = reinterpret_cast<const unsigned char*>(address_);
   // 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff  0123456789abcdef
-  char out[(kBitsPerWord / 4) + /* offset */
+  char out[(kBitsPerIntPtrT / 4) + /* offset */
            1 + /* colon */
            (16 * 3) + /* 16 hex digits and space */
            2 + /* white space */
@@ -49,7 +49,7 @@
     offset = 0;
   }
   memset(out, ' ', sizeof(out)-1);
-  out[kBitsPerWord / 4] = ':';
+  out[kBitsPerIntPtrT / 4] = ':';
   out[sizeof(out)-1] = '\0';
 
   size_t byte_count = byte_count_;
@@ -58,11 +58,11 @@
     size_t line_offset = offset & ~0x0f;
 
     char* hex = out;
-    char* asc = out + (kBitsPerWord / 4) + /* offset */ 1 + /* colon */
+    char* asc = out + (kBitsPerIntPtrT / 4) + /* offset */ 1 + /* colon */
         (16 * 3) + /* 16 hex digits and space */ 2 /* white space */;
 
-    for (int i = 0; i < (kBitsPerWord / 4); i++) {
-      *hex++ = gHexDigit[line_offset >> (kBitsPerWord - 4)];
+    for (int i = 0; i < (kBitsPerIntPtrT / 4); i++) {
+      *hex++ = gHexDigit[line_offset >> (kBitsPerIntPtrT - 4)];
       line_offset <<= 4;
     }
     hex++;
diff --git a/runtime/base/hex_dump_test.cc b/runtime/base/hex_dump_test.cc
index 3d782b2..bfd5c75 100644
--- a/runtime/base/hex_dump_test.cc
+++ b/runtime/base/hex_dump_test.cc
@@ -56,7 +56,7 @@
   std::ostringstream oss;
   oss << HexDump(&g16byte_aligned_number, 8, true, "");
   // Compare ignoring pointer.
-  EXPECT_STREQ(oss.str().c_str() + (kBitsPerWord / 4),
+  EXPECT_STREQ(oss.str().c_str() + (kBitsPerIntPtrT / 4),
                ": 68 67 66 65 64 63 62 61                          hgfedcba        ");
 }
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c3290f2..cf3a581 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2383,7 +2383,7 @@
 
 uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
                                                        const DexFile::ClassDef& dex_class_def) {
-  const byte* class_data = dex_file.GetClassData(dex_class_def);
+  const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
   size_t num_ref = 0;
   size_t num_8 = 0;
   size_t num_16 = 0;
@@ -2438,7 +2438,7 @@
 static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
                                                  uint32_t method_idx) {
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
-  const byte* class_data = dex_file.GetClassData(class_def);
+  const uint8_t* class_data = dex_file.GetClassData(class_def);
   CHECK(class_data != nullptr);
   ClassDataItemIterator it(dex_file, class_data);
   // Skip fields
@@ -2644,7 +2644,7 @@
   const DexFile& dex_file = klass->GetDexFile();
   const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
   CHECK(dex_class_def != nullptr);
-  const byte* class_data = dex_file.GetClassData(*dex_class_def);
+  const uint8_t* class_data = dex_file.GetClassData(*dex_class_def);
   // There should always be class data if there were direct methods.
   CHECK(class_data != nullptr) << PrettyDescriptor(klass);
   ClassDataItemIterator it(dex_file, class_data);
@@ -2805,7 +2805,7 @@
   klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
   klass->SetDexTypeIndex(dex_class_def.class_idx_);
 
-  const byte* class_data = dex_file.GetClassData(dex_class_def);
+  const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
   if (class_data == nullptr) {
     return;  // no fields or methods - for example a marker interface
   }
@@ -2825,7 +2825,7 @@
 }
 
 void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
-                                   const byte* class_data,
+                                   const uint8_t* class_data,
                                    Handle<mirror::Class> klass,
                                    mirror::ClassLoader* class_loader,
                                    const OatFile::OatClass* oat_class) {
@@ -3785,7 +3785,7 @@
   if (code_item->tries_size_ == 0) {
     return;  // nothing to process
   }
-  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
+  const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
@@ -4243,7 +4243,7 @@
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
     EncodedStaticFieldValueIterator value_it(dex_file, &dex_cache, &class_loader,
                                              this, *dex_class_def);
-    const byte* class_data = dex_file.GetClassData(*dex_class_def);
+    const uint8_t* class_data = dex_file.GetClassData(*dex_class_def);
     ClassDataItemIterator field_it(dex_file, class_data);
     if (value_it.HasNext()) {
       DCHECK(field_it.HasNextStaticField());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index cc75530..373fa89 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -461,7 +461,7 @@
   void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
                  Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LoadClassMembers(Thread* self, const DexFile& dex_file, const byte* class_data,
+  void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
                         Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
                         const OatFile::OatClass* oat_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index e095c48..c68fdca 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -26,14 +26,14 @@
 namespace art {
 
 inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
-  const byte* ptr = begin_ + string_id.string_data_off_;
+  const uint8_t* ptr = begin_ + string_id.string_data_off_;
   return DecodeUnsignedLeb128(&ptr);
 }
 
 inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
                                                         uint32_t* utf16_length) const {
   DCHECK(utf16_length != NULL) << GetLocation();
-  const byte* ptr = begin_ + string_id.string_data_off_;
+  const uint8_t* ptr = begin_ + string_id.string_data_off_;
   *utf16_length = DecodeUnsignedLeb128(&ptr);
   return reinterpret_cast<const char*>(ptr);
 }
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 6ef62c5..f408386 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -47,8 +47,8 @@
 
 namespace art {
 
-const byte DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
-const byte DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
+const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
+const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
 
 static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
   CHECK(magic != NULL);
@@ -323,7 +323,7 @@
 }
 
 
-const DexFile* DexFile::OpenMemory(const byte* base,
+const DexFile* DexFile::OpenMemory(const uint8_t* base,
                                    size_t size,
                                    const std::string& location,
                                    uint32_t location_checksum,
@@ -337,7 +337,7 @@
   }
 }
 
-DexFile::DexFile(const byte* base, size_t size,
+DexFile::DexFile(const uint8_t* base, size_t size,
                  const std::string& location,
                  uint32_t location_checksum,
                  MemMap* mem_map)
@@ -399,12 +399,12 @@
   return true;
 }
 
-bool DexFile::IsMagicValid(const byte* magic) {
+bool DexFile::IsMagicValid(const uint8_t* magic) {
   return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
 }
 
-bool DexFile::IsVersionValid(const byte* magic) {
-  const byte* version = &magic[sizeof(kDexMagic)];
+bool DexFile::IsVersionValid(const uint8_t* magic) {
+  const uint8_t* version = &magic[sizeof(kDexMagic)];
   return (memcmp(version, kDexMagicVersion, sizeof(kDexMagicVersion)) == 0);
 }
 
@@ -754,7 +754,7 @@
 
 void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
                                DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
-                               void* context, const byte* stream, LocalInfo* local_in_reg) const {
+                               void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
   uint32_t line = DecodeUnsignedLeb128(&stream);
   uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
   uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
@@ -919,7 +919,7 @@
                               DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
                               void* context) const {
   DCHECK(code_item != nullptr);
-  const byte* stream = GetDebugInfoStream(code_item);
+  const uint8_t* stream = GetDebugInfoStream(code_item);
   std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
                                       new LocalInfo[code_item->registers_size_] :
                                       NULL);
@@ -1059,7 +1059,7 @@
 }
 
 // Read a signed integer.  "zwidth" is the zero-based byte count.
-static int32_t ReadSignedInt(const byte* ptr, int zwidth) {
+static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) {
   int32_t val = 0;
   for (int i = zwidth; i >= 0; --i) {
     val = ((uint32_t)val >> 8) | (((int32_t)*ptr++) << 24);
@@ -1070,7 +1070,7 @@
 
 // Read an unsigned integer.  "zwidth" is the zero-based byte count,
 // "fill_on_right" indicates which side we want to zero-fill from.
-static uint32_t ReadUnsignedInt(const byte* ptr, int zwidth, bool fill_on_right) {
+static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) {
   uint32_t val = 0;
   if (!fill_on_right) {
     for (int i = zwidth; i >= 0; --i) {
@@ -1086,7 +1086,7 @@
 }
 
 // Read a signed long.  "zwidth" is the zero-based byte count.
-static int64_t ReadSignedLong(const byte* ptr, int zwidth) {
+static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) {
   int64_t val = 0;
   for (int i = zwidth; i >= 0; --i) {
     val = ((uint64_t)val >> 8) | (((int64_t)*ptr++) << 56);
@@ -1097,7 +1097,7 @@
 
 // Read an unsigned long.  "zwidth" is the zero-based byte count,
 // "fill_on_right" indicates which side we want to zero-fill from.
-static uint64_t ReadUnsignedLong(const byte* ptr, int zwidth, bool fill_on_right) {
+static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) {
   uint64_t val = 0;
   if (!fill_on_right) {
     for (int i = zwidth; i >= 0; --i) {
@@ -1137,8 +1137,8 @@
   if (pos_ >= array_size_) {
     return;
   }
-  byte value_type = *ptr_++;
-  byte value_arg = value_type >> kEncodedValueArgShift;
+  uint8_t value_type = *ptr_++;
+  uint8_t value_arg = value_type >> kEncodedValueArgShift;
   size_t width = value_arg + 1;  // assume and correct later
   type_ = static_cast<ValueType>(value_type & kEncodedValueTypeMask);
   switch (type_) {
@@ -1266,7 +1266,7 @@
   }
 }
 
-void CatchHandlerIterator::Init(const byte* handler_data) {
+void CatchHandlerIterator::Init(const uint8_t* handler_data) {
   current_data_ = handler_data;
   remaining_count_ = DecodeSignedLeb128(&current_data_);
 
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c160253..620bd6e 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -50,10 +50,10 @@
 // TODO: move all of the macro functionality into the DexCache class.
 class DexFile {
  public:
-  static const byte kDexMagic[];
-  static const byte kDexMagicVersion[];
-  static const size_t kSha1DigestSize = 20;
-  static const uint32_t kDexEndianConstant = 0x12345678;
+  static const uint8_t kDexMagic[];
+  static const uint8_t kDexMagicVersion[];
+  static constexpr size_t kSha1DigestSize = 20;
+  static constexpr uint32_t kDexEndianConstant = 0x12345678;
 
   // name of the DexFile entry within a zip archive
   static const char* kClassesDex;
@@ -440,10 +440,10 @@
   uint32_t GetVersion() const;
 
   // Returns true if the byte string points to the magic value.
-  static bool IsMagicValid(const byte* magic);
+  static bool IsMagicValid(const uint8_t* magic);
 
   // Returns true if the byte string after the magic is the correct value.
-  static bool IsVersionValid(const byte* magic);
+  static bool IsVersionValid(const uint8_t* magic);
 
   // Returns the number of string identifiers in the .dex file.
   size_t NumStringIds() const {
@@ -658,13 +658,13 @@
     if (class_def.interfaces_off_ == 0) {
         return NULL;
     } else {
-      const byte* addr = begin_ + class_def.interfaces_off_;
+      const uint8_t* addr = begin_ + class_def.interfaces_off_;
       return reinterpret_cast<const TypeList*>(addr);
     }
   }
 
   // Returns a pointer to the raw memory mapped class_data_item
-  const byte* GetClassData(const ClassDef& class_def) const {
+  const uint8_t* GetClassData(const ClassDef& class_def) const {
     if (class_def.class_data_off_ == 0) {
       return NULL;
     } else {
@@ -677,7 +677,7 @@
     if (code_off == 0) {
       return NULL;  // native or abstract method
     } else {
-      const byte* addr = begin_ + code_off;
+      const uint8_t* addr = begin_ + code_off;
       return reinterpret_cast<const CodeItem*>(addr);
     }
   }
@@ -730,12 +730,12 @@
     if (proto_id.parameters_off_ == 0) {
       return NULL;
     } else {
-      const byte* addr = begin_ + proto_id.parameters_off_;
+      const uint8_t* addr = begin_ + proto_id.parameters_off_;
       return reinterpret_cast<const TypeList*>(addr);
     }
   }
 
-  const byte* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
+  const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
     if (class_def.static_values_off_ == 0) {
       return 0;
     } else {
@@ -746,9 +746,9 @@
   static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset);
 
   // Get the base of the encoded data for the given DexCode.
-  static const byte* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) {
-    const byte* handler_data =
-        reinterpret_cast<const byte*>(GetTryItems(code_item, code_item.tries_size_));
+  static const uint8_t* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) {
+    const uint8_t* handler_data =
+        reinterpret_cast<const uint8_t*>(GetTryItems(code_item, code_item.tries_size_));
     return handler_data + offset;
   }
 
@@ -759,7 +759,7 @@
   static int32_t FindCatchHandlerOffset(const CodeItem &code_item, uint32_t address);
 
   // Get the pointer to the start of the debugging data
-  const byte* GetDebugInfoStream(const CodeItem* code_item) const {
+  const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
     if (code_item->debug_info_off_ == 0) {
       return NULL;
     } else {
@@ -862,7 +862,7 @@
 
   bool DisableWrite() const;
 
-  const byte* Begin() const {
+  const uint8_t* Begin() const {
     return begin_;
   }
 
@@ -917,14 +917,14 @@
                                    std::string* error_msg);
 
   // Opens a .dex file at the given address, optionally backed by a MemMap
-  static const DexFile* OpenMemory(const byte* dex_file,
+  static const DexFile* OpenMemory(const uint8_t* dex_file,
                                    size_t size,
                                    const std::string& location,
                                    uint32_t location_checksum,
                                    MemMap* mem_map,
                                    std::string* error_msg);
 
-  DexFile(const byte* base, size_t size,
+  DexFile(const uint8_t* base, size_t size,
           const std::string& location,
           uint32_t location_checksum,
           MemMap* mem_map);
@@ -937,7 +937,7 @@
 
   void DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
       DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
-      void* context, const byte* stream, LocalInfo* local_in_reg) const;
+      void* context, const uint8_t* stream, LocalInfo* local_in_reg) const;
 
   // Check whether a location denotes a multidex dex file. This is a very simple check: returns
   // whether the string contains the separator character.
@@ -945,7 +945,7 @@
 
 
   // The base address of the memory mapping.
-  const byte* const begin_;
+  const uint8_t* const begin_;
 
   // The size of the underlying memory allocation in bytes.
   const size_t size_;
@@ -1059,7 +1059,7 @@
 // Iterate and decode class_data_item
 class ClassDataItemIterator {
  public:
-  ClassDataItemIterator(const DexFile& dex_file, const byte* raw_class_data_item)
+  ClassDataItemIterator(const DexFile& dex_file, const uint8_t* raw_class_data_item)
       : dex_file_(dex_file), pos_(0), ptr_pos_(raw_class_data_item), last_idx_(0) {
     ReadClassDataHeader();
     if (EndOfInstanceFieldsPos() > 0) {
@@ -1174,7 +1174,7 @@
   uint32_t GetMethodCodeItemOffset() const {
     return method_.code_off_;
   }
-  const byte* EndDataPointer() const {
+  const uint8_t* EndDataPointer() const {
     CHECK(!HasNext());
     return ptr_pos_;
   }
@@ -1236,7 +1236,7 @@
 
   const DexFile& dex_file_;
   size_t pos_;  // integral number of items passed
-  const byte* ptr_pos_;  // pointer into stream of class_data_item
+  const uint8_t* ptr_pos_;  // pointer into stream of class_data_item
   uint32_t last_idx_;  // last read field or method index to apply delta to
   DISALLOW_IMPLICIT_CONSTRUCTORS(ClassDataItemIterator);
 };
@@ -1275,8 +1275,8 @@
   };
 
  private:
-  static const byte kEncodedValueTypeMask = 0x1f;  // 0b11111
-  static const byte kEncodedValueArgShift = 5;
+  static constexpr uint8_t kEncodedValueTypeMask = 0x1f;  // 0b11111
+  static constexpr uint8_t kEncodedValueArgShift = 5;
 
   const DexFile& dex_file_;
   Handle<mirror::DexCache>* const dex_cache_;  // Dex cache to resolve literal objects.
@@ -1284,7 +1284,7 @@
   ClassLinker* linker_;  // Linker to resolve literal objects.
   size_t array_size_;  // Size of array.
   size_t pos_;  // Current position.
-  const byte* ptr_;  // Pointer into encoded data array.
+  const uint8_t* ptr_;  // Pointer into encoded data array.
   ValueType type_;  // Type of current encoded value.
   jvalue jval_;  // Value of current encoded value.
   DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedStaticFieldValueIterator);
@@ -1298,7 +1298,7 @@
     CatchHandlerIterator(const DexFile::CodeItem& code_item,
                          const DexFile::TryItem& try_item);
 
-    explicit CatchHandlerIterator(const byte* handler_data) {
+    explicit CatchHandlerIterator(const uint8_t* handler_data) {
       Init(handler_data);
     }
 
@@ -1313,20 +1313,20 @@
       return remaining_count_ != -1 || catch_all_;
     }
     // End of this set of catch blocks, convenience method to locate next set of catch blocks
-    const byte* EndDataPointer() const {
+    const uint8_t* EndDataPointer() const {
       CHECK(!HasNext());
       return current_data_;
     }
 
   private:
     void Init(const DexFile::CodeItem& code_item, int32_t offset);
-    void Init(const byte* handler_data);
+    void Init(const uint8_t* handler_data);
 
     struct CatchHandlerItem {
       uint16_t type_idx_;  // type index of the caught exception type
       uint32_t address_;  // handler address
     } handler_;
-    const byte *current_data_;  // the current handler in dex file.
+    const uint8_t* current_data_;  // the current handler in dex file.
     int32_t remaining_count_;   // number of handlers not read.
     bool catch_all_;            // is there a handler that will catch all exceptions in case
                                 // that all typed handler does not match.
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index d0c5603..134e284 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -35,7 +35,7 @@
   ASSERT_TRUE(dex != NULL);
 }
 
-static const byte kBase64Map[256] = {
+static const uint8_t kBase64Map[256] = {
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -60,12 +60,12 @@
   255, 255, 255, 255
 };
 
-static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
-  std::vector<byte> tmp;
+static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+  std::vector<uint8_t> tmp;
   uint32_t t = 0, y = 0;
   int g = 3;
   for (size_t i = 0; src[i] != '\0'; ++i) {
-    byte c = kBase64Map[src[i] & 0xFF];
+    uint8_t c = kBase64Map[src[i] & 0xFF];
     if (c == 255) continue;
     // the final = symbols are read and used to trim the remaining bytes
     if (c == 254) {
@@ -96,7 +96,7 @@
     *dst_size = 0;
     return nullptr;
   }
-  std::unique_ptr<byte[]> dst(new byte[tmp.size()]);
+  std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
   if (dst_size != nullptr) {
     *dst_size = tmp.size();
   } else {
@@ -137,7 +137,7 @@
   // decode base64
   CHECK(base64 != NULL);
   size_t length;
-  std::unique_ptr<byte[]> dex_bytes(DecodeBase64(base64, &length));
+  std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
   CHECK(dex_bytes.get() != NULL);
 
   // write to provided file
@@ -229,7 +229,7 @@
   const DexFile::ClassDef& class_def = raw->GetClassDef(0);
   ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
 
-  const byte* class_data = raw->GetClassData(class_def);
+  const uint8_t* class_data = raw->GetClassData(class_def);
   ASSERT_TRUE(class_data != NULL);
   ClassDataItemIterator it(*raw, class_data);
 
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 9eba92f..0597253 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -124,7 +124,7 @@
     error_stmt;                                             \
   }
 
-bool DexFileVerifier::Verify(const DexFile* dex_file, const byte* begin, size_t size,
+bool DexFileVerifier::Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
                              const char* location, std::string* error_msg) {
   std::unique_ptr<DexFileVerifier> verifier(new DexFileVerifier(dex_file, begin, size, location));
   if (!verifier->Verify()) {
@@ -175,8 +175,8 @@
   // Check that size is not 0.
   CHECK_NE(elem_size, 0U);
 
-  const byte* range_start = reinterpret_cast<const byte*>(start);
-  const byte* file_start = reinterpret_cast<const byte*>(begin_);
+  const uint8_t* range_start = reinterpret_cast<const uint8_t*>(start);
+  const uint8_t* file_start = reinterpret_cast<const uint8_t*>(begin_);
 
   // Check for overflow.
   uintptr_t max = 0 - 1;
@@ -189,8 +189,8 @@
     return false;
   }
 
-  const byte* range_end = range_start + count * elem_size;
-  const byte* file_end = file_start + size_;
+  const uint8_t* range_end = range_start + count * elem_size;
+  const uint8_t* file_end = file_start + size_;
   if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
     // Note: these two tests are enough as we make sure above that there's no overflow.
     ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
@@ -201,7 +201,7 @@
   return true;
 }
 
-bool DexFileVerifier::CheckList(size_t element_size, const char* label, const byte* *ptr) {
+bool DexFileVerifier::CheckList(size_t element_size, const char* label, const uint8_t* *ptr) {
   // Check that the list is available. The first 4B are the count.
   if (!CheckListSize(*ptr, 1, 4U, label)) {
     return false;
@@ -251,7 +251,7 @@
   // Compute and verify the checksum in the header.
   uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
   const uint32_t non_sum = sizeof(header_->magic_) + sizeof(header_->checksum_);
-  const byte* non_sum_ptr = reinterpret_cast<const byte*>(header_) + non_sum;
+  const uint8_t* non_sum_ptr = reinterpret_cast<const uint8_t*>(header_) + non_sum;
   adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
   if (adler_checksum != header_->checksum_) {
     ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
@@ -388,7 +388,7 @@
 
 uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
   uint32_t result = 0;
-  if (LIKELY(CheckListSize(ptr_, size, sizeof(byte), "encoded_value"))) {
+  if (LIKELY(CheckListSize(ptr_, size, sizeof(uint8_t), "encoded_value"))) {
     for (uint32_t i = 0; i < size; i++) {
       result |= ((uint32_t) *(ptr_++)) << (i * 8);
     }
@@ -398,7 +398,7 @@
 
 bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
                                                 uint32_t* handler_offsets, uint32_t handlers_size) {
-  const byte* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
+  const uint8_t* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
 
   for (uint32_t i = 0; i < handlers_size; i++) {
     bool catch_all;
@@ -503,7 +503,7 @@
 
 bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
   if (offset < aligned_offset) {
-    if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(byte), "section")) {
+    if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(uint8_t), "section")) {
       return false;
     }
     while (offset < aligned_offset) {
@@ -519,7 +519,7 @@
 }
 
 bool DexFileVerifier::CheckEncodedValue() {
-  if (!CheckListSize(ptr_, 1, sizeof(byte), "encoded_value header")) {
+  if (!CheckListSize(ptr_, 1, sizeof(uint8_t), "encoded_value header")) {
     return false;
   }
 
@@ -746,7 +746,7 @@
   // Grab the end of the insns if there are no try_items.
   uint32_t try_items_size = code_item->tries_size_;
   if (try_items_size == 0) {
-    ptr_ = reinterpret_cast<const byte*>(&insns[insns_size]);
+    ptr_ = reinterpret_cast<const uint8_t*>(&insns[insns_size]);
     return true;
   }
 
@@ -812,7 +812,7 @@
 
 bool DexFileVerifier::CheckIntraStringDataItem() {
   uint32_t size = DecodeUnsignedLeb128(&ptr_);
-  const byte* file_end = begin_ + size_;
+  const uint8_t* file_end = begin_ + size_;
 
   for (uint32_t i = 0; i < size; i++) {
     CHECK_LT(i, size);  // b/15014252 Prevents hitting the impossible case below
@@ -1003,7 +1003,7 @@
 }
 
 bool DexFileVerifier::CheckIntraAnnotationItem() {
-  if (!CheckListSize(ptr_, 1, sizeof(byte), "annotation visibility")) {
+  if (!CheckListSize(ptr_, 1, sizeof(uint8_t), "annotation visibility")) {
     return false;
   }
 
@@ -1090,7 +1090,7 @@
   }
 
   // Return a pointer to the end of the annotations.
-  ptr_ = reinterpret_cast<const byte*>(parameter_item);
+  ptr_ = reinterpret_cast<const uint8_t*>(parameter_item);
   return true;
 }
 
@@ -1416,7 +1416,7 @@
   return true;
 }
 
-uint16_t DexFileVerifier::FindFirstClassDataDefiner(const byte* ptr, bool* success) {
+uint16_t DexFileVerifier::FindFirstClassDataDefiner(const uint8_t* ptr, bool* success) {
   ClassDataItemIterator it(*dex_file_, ptr);
   *success = true;
 
@@ -1435,7 +1435,7 @@
   return DexFile::kDexNoIndex16;
 }
 
-uint16_t DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const byte* ptr, bool* success) {
+uint16_t DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr, bool* success) {
   const DexFile::AnnotationsDirectoryItem* item =
       reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr);
   *success = true;
@@ -1759,7 +1759,7 @@
 
   // Check that references in class_data_item are to the right class.
   if (item->class_data_off_ != 0) {
-    const byte* data = begin_ + item->class_data_off_;
+    const uint8_t* data = begin_ + item->class_data_off_;
     bool success;
     uint16_t data_definer = FindFirstClassDataDefiner(data, &success);
     if (!success) {
@@ -1773,7 +1773,7 @@
 
   // Check that references in annotations_directory_item are to right class.
   if (item->annotations_off_ != 0) {
-    const byte* data = begin_ + item->annotations_off_;
+    const uint8_t* data = begin_ + item->annotations_off_;
     bool success;
     uint16_t annotations_definer = FindFirstAnnotationsDirectoryDefiner(data, &success);
     if (!success) {
@@ -1804,7 +1804,7 @@
     item++;
   }
 
-  ptr_ = reinterpret_cast<const byte*>(item);
+  ptr_ = reinterpret_cast<const uint8_t*>(item);
   return true;
 }
 
@@ -1834,7 +1834,7 @@
     offsets++;
   }
 
-  ptr_ = reinterpret_cast<const byte*>(offsets);
+  ptr_ = reinterpret_cast<const uint8_t*>(offsets);
   return true;
 }
 
@@ -1935,7 +1935,7 @@
     parameter_item++;
   }
 
-  ptr_ = reinterpret_cast<const byte*>(parameter_item);
+  ptr_ = reinterpret_cast<const uint8_t*>(parameter_item);
   return true;
 }
 
@@ -1956,7 +1956,7 @@
   for (uint32_t i = 0; i < count; i++) {
     uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
     ptr_ = begin_ + new_offset;
-    const byte* prev_ptr = ptr_;
+    const uint8_t* prev_ptr = ptr_;
 
     // Check depending on the section type.
     switch (type) {
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 606da54..18bf2e7 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -26,7 +26,7 @@
 
 class DexFileVerifier {
  public:
-  static bool Verify(const DexFile* dex_file, const byte* begin, size_t size,
+  static bool Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
                      const char* location, std::string* error_msg);
 
   const std::string& FailureReason() const {
@@ -34,7 +34,7 @@
   }
 
  private:
-  DexFileVerifier(const DexFile* dex_file, const byte* begin, size_t size, const char* location)
+  DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
       : dex_file_(dex_file), begin_(begin), size_(size), location_(location),
         header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL)  {
   }
@@ -45,7 +45,7 @@
   bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
   // Check a list. The head is assumed to be at *ptr, and elements to be of size element_size. If
   // successful, the ptr will be moved forward the amount covered by the list.
-  bool CheckList(size_t element_size, const char* label, const byte* *ptr);
+  bool CheckList(size_t element_size, const char* label, const uint8_t* *ptr);
   // Checks whether the offset is zero (when size is zero) or that the offset falls within the area
   // claimed by the file.
   bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, const char* label);
@@ -81,8 +81,8 @@
 
   // Note: as sometimes kDexNoIndex16, being 0xFFFF, is a valid return value, we need an
   // additional out parameter to signal any errors loading an index.
-  uint16_t FindFirstClassDataDefiner(const byte* ptr, bool* success);
-  uint16_t FindFirstAnnotationsDirectoryDefiner(const byte* ptr, bool* success);
+  uint16_t FindFirstClassDataDefiner(const uint8_t* ptr, bool* success);
+  uint16_t FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr, bool* success);
 
   bool CheckInterStringIdItem();
   bool CheckInterTypeIdItem();
@@ -112,13 +112,13 @@
       __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
 
   const DexFile* const dex_file_;
-  const byte* const begin_;
+  const uint8_t* const begin_;
   const size_t size_;
   const char* const location_;
   const DexFile::Header* const header_;
 
   AllocationTrackingSafeMap<uint32_t, uint16_t, kAllocatorTagDexFileVerifier> offset_to_type_map_;
-  const byte* ptr_;
+  const uint8_t* ptr_;
   const void* previous_item_;
 
   std::string failure_reason_;
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index d475d42..addd948 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -30,7 +30,7 @@
 
 class DexFileVerifierTest : public CommonRuntimeTest {};
 
-static const byte kBase64Map[256] = {
+static const uint8_t kBase64Map[256] = {
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -55,12 +55,12 @@
   255, 255, 255, 255
 };
 
-static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
-  std::vector<byte> tmp;
+static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+  std::vector<uint8_t> tmp;
   uint32_t t = 0, y = 0;
   int g = 3;
   for (size_t i = 0; src[i] != '\0'; ++i) {
-    byte c = kBase64Map[src[i] & 0xFF];
+    uint8_t c = kBase64Map[src[i] & 0xFF];
     if (c == 255) continue;
     // the final = symbols are read and used to trim the remaining bytes
     if (c == 254) {
@@ -91,7 +91,7 @@
     *dst_size = 0;
     return nullptr;
   }
-  std::unique_ptr<byte[]> dst(new byte[tmp.size()]);
+  std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
   if (dst_size != nullptr) {
     *dst_size = tmp.size();
   } else {
@@ -106,7 +106,7 @@
   // decode base64
   CHECK(base64 != NULL);
   size_t length;
-  std::unique_ptr<byte[]> dex_bytes(DecodeBase64(base64, &length));
+  std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
   CHECK(dex_bytes.get() != NULL);
 
   // write to provided file
@@ -153,17 +153,17 @@
   ASSERT_TRUE(raw.get() != nullptr) << error_msg;
 }
 
-static void FixUpChecksum(byte* dex_file) {
+static void FixUpChecksum(uint8_t* dex_file) {
   DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
   uint32_t expected_size = header->file_size_;
   uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
   const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
-  const byte* non_sum_ptr = dex_file + non_sum;
+  const uint8_t* non_sum_ptr = dex_file + non_sum;
   adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
   header->checksum_ = adler_checksum;
 }
 
-static const DexFile* FixChecksumAndOpen(byte* bytes, size_t length, const char* location,
+static const DexFile* FixChecksumAndOpen(uint8_t* bytes, size_t length, const char* location,
                                          std::string* error_msg) {
   // Check data.
   CHECK(bytes != nullptr);
@@ -196,7 +196,7 @@
                                     std::string* error_msg) {
   // Decode base64.
   size_t length;
-  std::unique_ptr<byte[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
+  std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
   CHECK(dex_bytes.get() != NULL);
 
   // Make modifications.
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 806266d..14e316f 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -139,7 +139,7 @@
   uint32_t dex_file_index_;
   uint32_t class_def_index_;
   const DexFile::ClassDef* class_def_;
-  const byte* class_data_;
+  const uint8_t* class_data_;
   std::unique_ptr<ClassDataItemIterator> it_;
   bool direct_method_;
 };
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 3b8358d..c3a2559 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -43,7 +43,7 @@
   struct JITCodeEntry {
     JITCodeEntry* next_;
     JITCodeEntry* prev_;
-    const byte *symfile_addr_;
+    const uint8_t *symfile_addr_;
     uint64_t symfile_size_;
   };
 
@@ -68,7 +68,7 @@
 }
 
 
-static JITCodeEntry* CreateCodeEntry(const byte *symfile_addr,
+static JITCodeEntry* CreateCodeEntry(const uint8_t *symfile_addr,
                                      uintptr_t symfile_size) {
   JITCodeEntry* entry = new JITCodeEntry;
   entry->symfile_addr_ = symfile_addr;
@@ -264,7 +264,7 @@
     }
 
     if (!CheckAndSet(GetDynamicProgramHeader().p_offset, "dynamic section",
-                     reinterpret_cast<byte**>(&dynamic_section_start_), error_msg)) {
+                     reinterpret_cast<uint8_t**>(&dynamic_section_start_), error_msg)) {
       return false;
     }
 
@@ -279,14 +279,14 @@
       switch (section_header->sh_type) {
         case SHT_SYMTAB: {
           if (!CheckAndSet(section_header->sh_offset, "symtab",
-                           reinterpret_cast<byte**>(&symtab_section_start_), error_msg)) {
+                           reinterpret_cast<uint8_t**>(&symtab_section_start_), error_msg)) {
             return false;
           }
           break;
         }
         case SHT_DYNSYM: {
           if (!CheckAndSet(section_header->sh_offset, "dynsym",
-                           reinterpret_cast<byte**>(&dynsym_section_start_), error_msg)) {
+                           reinterpret_cast<uint8_t**>(&dynsym_section_start_), error_msg)) {
             return false;
           }
           break;
@@ -298,7 +298,7 @@
             const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
             if (strncmp(".dynstr", header_name, 8) == 0) {
               if (!CheckAndSet(section_header->sh_offset, "dynstr",
-                               reinterpret_cast<byte**>(&dynstr_section_start_), error_msg)) {
+                               reinterpret_cast<uint8_t**>(&dynstr_section_start_), error_msg)) {
                 return false;
               }
             }
@@ -307,7 +307,7 @@
             const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
             if (strncmp(".strtab", header_name, 8) == 0) {
               if (!CheckAndSet(section_header->sh_offset, "strtab",
-                               reinterpret_cast<byte**>(&strtab_section_start_), error_msg)) {
+                               reinterpret_cast<uint8_t**>(&strtab_section_start_), error_msg)) {
                 return false;
               }
             }
@@ -315,7 +315,7 @@
           break;
         }
         case SHT_DYNAMIC: {
-          if (reinterpret_cast<byte*>(dynamic_section_start_) !=
+          if (reinterpret_cast<uint8_t*>(dynamic_section_start_) !=
               Begin() + section_header->sh_offset) {
             LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
                          << file_->GetPath() << ": " << std::hex
@@ -327,7 +327,7 @@
         }
         case SHT_HASH: {
           if (!CheckAndSet(section_header->sh_offset, "hash section",
-                           reinterpret_cast<byte**>(&hash_section_start_), error_msg)) {
+                           reinterpret_cast<uint8_t**>(&hash_section_start_), error_msg)) {
             return false;
           }
           break;
@@ -365,7 +365,7 @@
 bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
     ::CheckAndSet(Elf32_Off offset, const char* label,
-                  byte** target, std::string* error_msg) {
+                  uint8_t** target, std::string* error_msg) {
   if (Begin() + offset >= End()) {
     *error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
                               file_->GetPath().c_str());
@@ -380,7 +380,7 @@
           typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
 bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
-    ::CheckSectionsLinked(const byte* source, const byte* target) const {
+    ::CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const {
   // Only works in whole-program mode, as we need to iterate over the sections.
   // Note that we normally can't search by type, as duplicates are allowed for most section types.
   if (program_header_only_) {
@@ -449,8 +449,8 @@
     }
 
     // The symtab should link to the strtab.
-    if (!CheckSectionsLinked(reinterpret_cast<const byte*>(symtab_section_start_),
-                             reinterpret_cast<const byte*>(strtab_section_start_))) {
+    if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(symtab_section_start_),
+                             reinterpret_cast<const uint8_t*>(strtab_section_start_))) {
       *error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
                                 file_->GetPath().c_str());
       return false;
@@ -475,8 +475,8 @@
   }
 
   // And the hash section should be linking to the dynsym.
-  if (!CheckSectionsLinked(reinterpret_cast<const byte*>(hash_section_start_),
-                           reinterpret_cast<const byte*>(dynsym_section_start_))) {
+  if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(hash_section_start_),
+                           reinterpret_cast<const uint8_t*>(dynsym_section_start_))) {
     *error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
                               file_->GetPath().c_str());
     return false;
@@ -637,7 +637,7 @@
 template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
           typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
           typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
     ::GetProgramHeadersStart() const {
   CHECK(program_headers_start_ != nullptr);  // Header has been set in Setup. This is a sanity
@@ -648,7 +648,7 @@
 template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
           typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
           typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
     ::GetSectionHeadersStart() const {
   CHECK(!program_header_only_);              // Only used in "full" mode.
@@ -813,7 +813,7 @@
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
     ::GetProgramHeader(Elf_Word i) const {
   CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath();  // Sanity check for caller.
-  byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
+  uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
   if (program_header >= End()) {
     return nullptr;  // Failure condition.
   }
@@ -856,7 +856,7 @@
   if (i >= GetSectionHeaderNum()) {
     return nullptr;  // Failure condition.
   }
-  byte* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
+  uint8_t* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
   if (section_header >= End()) {
     return nullptr;  // Failure condition.
   }
@@ -907,7 +907,7 @@
 template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
           typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
           typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+const uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
     ::FindDynamicSymbolAddress(const std::string& symbol_name) const {
   // Check that we have a hash section.
@@ -1133,8 +1133,8 @@
   if (i == 0) {
     return nullptr;
   }
-  byte* strings = Begin() + string_section.sh_offset;
-  byte* string = strings + i;
+  uint8_t* strings = Begin() + string_section.sh_offset;
+  uint8_t* string = strings + i;
   if (string >= End()) {
     return nullptr;
   }
@@ -1361,8 +1361,8 @@
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
     if (!reserved) {
-      byte* reserve_base = ((program_header->p_vaddr != 0) ?
-                            reinterpret_cast<byte*>(program_header->p_vaddr) : nullptr);
+      uint8_t* reserve_base = ((program_header->p_vaddr != 0) ?
+                            reinterpret_cast<uint8_t*>(program_header->p_vaddr) : nullptr);
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
@@ -1384,7 +1384,7 @@
     if (program_header->p_memsz == 0) {
       continue;
     }
-    byte* p_vaddr = base_address_ + program_header->p_vaddr;
+    uint8_t* p_vaddr = base_address_ + program_header->p_vaddr;
     int prot = 0;
     if (executable && ((program_header->p_flags & PF_X) != 0)) {
       prot |= PROT_EXEC;
@@ -1431,7 +1431,7 @@
   }
 
   // Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
-  byte* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
+  uint8_t* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
   if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
     *error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
                               file_->GetPath().c_str());
@@ -1441,7 +1441,7 @@
 
   for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
     Elf_Dyn& elf_dyn = GetDynamic(i);
-    byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
+    uint8_t* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
     switch (elf_dyn.d_tag) {
       case DT_HASH: {
         if (!ValidPointer(d_ptr)) {
@@ -1500,7 +1500,7 @@
           typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
 bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
-    ::ValidPointer(const byte* start) const {
+    ::ValidPointer(const uint8_t* start) const {
   for (size_t i = 0; i < segments_.size(); ++i) {
     const MemMap* segment = segments_[i];
     if (segment->Begin() <= start && start < segment->End()) {
@@ -1550,7 +1550,7 @@
 };
 
 static FDE32* NextFDE(FDE32* frame) {
-  byte* fde_bytes = reinterpret_cast<byte*>(frame);
+  uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
   fde_bytes += frame->GetLength();
   return reinterpret_cast<FDE32*>(fde_bytes);
 }
@@ -1572,7 +1572,7 @@
 };
 
 static FDE64* NextFDE(FDE64* frame) {
-  byte* fde_bytes = reinterpret_cast<byte*>(frame);
+  uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
   fde_bytes += frame->GetLength();
   return reinterpret_cast<FDE64*>(fde_bytes);
 }
@@ -1582,7 +1582,7 @@
 }
 
 static bool FixupEHFrame(off_t base_address_delta,
-                           byte* eh_frame, size_t eh_frame_size) {
+                           uint8_t* eh_frame, size_t eh_frame_size) {
   if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
     FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
     FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
@@ -1787,8 +1787,8 @@
   ~DebugTag() {}
   // Creates a new tag and moves data pointer up to the start of the next one.
   // nullptr means error.
-  static DebugTag* Create(const byte** data_pointer) {
-    const byte* data = *data_pointer;
+  static DebugTag* Create(const uint8_t** data_pointer) {
+    const uint8_t* data = *data_pointer;
     uint32_t index = DecodeUnsignedLeb128(&data);
     std::unique_ptr<DebugTag> tag(new DebugTag(index));
     tag->size_ = static_cast<uint32_t>(
@@ -1867,7 +1867,7 @@
 class DebugAbbrev {
  public:
   ~DebugAbbrev() {}
-  static DebugAbbrev* Create(const byte* dbg_abbrev, size_t dbg_abbrev_size) {
+  static DebugAbbrev* Create(const uint8_t* dbg_abbrev, size_t dbg_abbrev_size) {
     std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
     if (!abbrev->ReadAtOffset(0)) {
       return nullptr;
@@ -1878,7 +1878,7 @@
   bool ReadAtOffset(uint32_t abbrev_offset) {
     tags_.clear();
     tag_list_.clear();
-    const byte* dbg_abbrev = begin_ + abbrev_offset;
+    const uint8_t* dbg_abbrev = begin_ + abbrev_offset;
     while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
       std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
       if (tag.get() == nullptr) {
@@ -1891,7 +1891,7 @@
     return true;
   }
 
-  DebugTag* ReadTag(const byte* entry) {
+  DebugTag* ReadTag(const uint8_t* entry) {
     uint32_t tag_num = DecodeUnsignedLeb128(&entry);
     auto it = tags_.find(tag_num);
     if (it == tags_.end()) {
@@ -1903,9 +1903,9 @@
   }
 
  private:
-  DebugAbbrev(const byte* begin, const byte* end) : begin_(begin), end_(end) {}
-  const byte* begin_;
-  const byte* end_;
+  DebugAbbrev(const uint8_t* begin, const uint8_t* end) : begin_(begin), end_(end) {}
+  const uint8_t* begin_;
+  const uint8_t* end_;
   std::map<uint32_t, uint32_t> tags_;
   std::vector<std::unique_ptr<DebugTag>> tag_list_;
 };
@@ -1934,7 +1934,7 @@
     if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
       current_cu_ = next_cu_;
       next_cu_ = GetNextCu(current_cu_);
-      current_entry_ = reinterpret_cast<byte*>(current_cu_) + sizeof(DebugInfoHeader);
+      current_entry_ = reinterpret_cast<uint8_t*>(current_cu_) + sizeof(DebugInfoHeader);
       reread_abbrev = true;
     }
     if (current_entry_ >= last_entry_) {
@@ -1956,7 +1956,7 @@
   const DebugTag* GetCurrentTag() {
     return const_cast<DebugTag*>(current_tag_);
   }
-  byte* GetPointerToField(uint8_t dwarf_field) {
+  uint8_t* GetPointerToField(uint8_t dwarf_field) {
     if (current_tag_ == nullptr || current_entry_ == nullptr || current_entry_ >= last_entry_) {
       return nullptr;
     }
@@ -1972,7 +1972,7 @@
 
  private:
   static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
-    byte* hdr_byte = reinterpret_cast<byte*>(hdr);
+    uint8_t* hdr_byte = reinterpret_cast<uint8_t*>(hdr);
     return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
   }
 
@@ -1980,14 +1980,14 @@
       : abbrev_(abbrev),
         current_cu_(header),
         next_cu_(GetNextCu(header)),
-        last_entry_(reinterpret_cast<byte*>(header) + frame_size),
-        current_entry_(reinterpret_cast<byte*>(header) + sizeof(DebugInfoHeader)),
+        last_entry_(reinterpret_cast<uint8_t*>(header) + frame_size),
+        current_entry_(reinterpret_cast<uint8_t*>(header) + sizeof(DebugInfoHeader)),
         current_tag_(abbrev_->ReadTag(current_entry_)) {}
   DebugAbbrev* abbrev_;
   DebugInfoHeader* current_cu_;
   DebugInfoHeader* next_cu_;
-  byte* last_entry_;
-  byte* current_entry_;
+  uint8_t* last_entry_;
+  uint8_t* current_entry_;
   DebugTag* current_tag_;
 };
 
@@ -2437,7 +2437,7 @@
   if (map == nullptr && map->Size() != EI_NIDENT) {
     return nullptr;
   }
-  byte *header = map->Begin();
+  uint8_t* header = map->Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, writable, program_header_only, error_msg);
     if (elf_file_impl == nullptr)
@@ -2468,7 +2468,7 @@
   if (map == nullptr && map->Size() != EI_NIDENT) {
     return nullptr;
   }
-  byte *header = map->Begin();
+  uint8_t* header = map->Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, mmap_prot, mmap_flags, error_msg);
     if (elf_file_impl == nullptr)
@@ -2501,7 +2501,7 @@
   DELEGATE_TO_IMPL(Load, executable, error_msg);
 }
 
-const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
   DELEGATE_TO_IMPL(FindDynamicSymbolAddress, symbol_name);
 }
 
@@ -2509,11 +2509,11 @@
   DELEGATE_TO_IMPL(Size);
 }
 
-byte* ElfFile::Begin() const {
+uint8_t* ElfFile::Begin() const {
   DELEGATE_TO_IMPL(Begin);
 }
 
-byte* ElfFile::End() const {
+uint8_t* ElfFile::End() const {
   DELEGATE_TO_IMPL(End);
 }
 
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index ea6538b..a7f3056 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -40,13 +40,13 @@
   // Load segments into memory based on PT_LOAD program headers
   bool Load(bool executable, std::string* error_msg);
 
-  const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
+  const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
 
   size_t Size() const;
 
-  byte* Begin() const;
+  uint8_t* Begin() const;
 
-  byte* End() const;
+  uint8_t* End() const;
 
   const File& GetFile() const;
 
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 942dc291..a2fc422 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -46,11 +46,11 @@
     return *file_;
   }
 
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return map_->Begin();
   }
 
-  byte* End() const {
+  uint8_t* End() const {
     return map_->End();
   }
 
@@ -71,7 +71,7 @@
   Elf_Shdr* GetSectionNameStringSection() const;
 
   // Find .dynsym using .hash for more efficient lookup than FindSymbolAddress.
-  const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
+  const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
 
   static bool IsSymbolSectionType(Elf_Word section_type);
   Elf_Word GetSymbolNum(Elf_Shdr&) const;
@@ -120,8 +120,8 @@
 
   bool SetMap(MemMap* map, std::string* error_msg);
 
-  byte* GetProgramHeadersStart() const;
-  byte* GetSectionHeadersStart() const;
+  uint8_t* GetProgramHeadersStart() const;
+  uint8_t* GetSectionHeadersStart() const;
   Elf_Phdr& GetDynamicProgramHeader() const;
   Elf_Dyn* GetDynamicSectionStart() const;
   Elf_Sym* GetSymbolSectionStart(Elf_Word section_type) const;
@@ -137,7 +137,7 @@
   typedef std::map<std::string, Elf_Sym*> SymbolTable;
   SymbolTable** GetSymbolTable(Elf_Word section_type);
 
-  bool ValidPointer(const byte* start) const;
+  bool ValidPointer(const uint8_t* start) const;
 
   const Elf_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
 
@@ -145,10 +145,10 @@
   bool CheckSectionsExist(std::string* error_msg) const;
 
   // Check that the link of the first section links to the second section.
-  bool CheckSectionsLinked(const byte* source, const byte* target) const;
+  bool CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const;
 
   // Check whether the offset is in range, and set to target to Begin() + offset if OK.
-  bool CheckAndSet(Elf32_Off offset, const char* label, byte** target, std::string* error_msg);
+  bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
 
   // Find symbol in specified table, returning nullptr if it is not found.
   //
@@ -182,13 +182,13 @@
 
   // Pointer to start of first PT_LOAD program segment after Load()
   // when program_header_only_ is true.
-  byte* base_address_;
+  uint8_t* base_address_;
 
   // The program header should always available but use GetProgramHeadersStart() to be sure.
-  byte* program_headers_start_;
+  uint8_t* program_headers_start_;
 
   // Conditionally available values. Use accessors to ensure they exist if they are required.
-  byte* section_headers_start_;
+  uint8_t* section_headers_start_;
   Elf_Phdr* dynamic_program_header_;
   Elf_Dyn* dynamic_section_start_;
   Elf_Sym* symtab_section_start_;
@@ -201,7 +201,7 @@
   SymbolTable* dynsym_symbol_table_;
 
   // Support for GDB JIT
-  byte* jit_elf_image_;
+  uint8_t* jit_elf_image_;
   JITCodeEntry* jit_gdb_entry_;
   std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
                   Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel,
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 7f6144b..642c94a 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -64,8 +64,8 @@
     caller_mh_(caller_mh),
     args_in_regs_(ComputeArgsInRegs(caller_mh)),
     num_params_(caller_mh.NumArgs()),
-    reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
-    stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+    reg_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+    stack_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
                 + PORTABLE_STACK_ARG_SKIP),
     cur_args_(reg_args_),
     cur_arg_index_(0),
@@ -88,8 +88,8 @@
     return caller_mh_.GetParamPrimitiveType(param_index_);
   }
 
-  byte* GetParamAddress() const {
-    return cur_args_ + (cur_arg_index_ * kPointerSize);
+  uint8_t* GetParamAddress() const {
+    return cur_args_ + (cur_arg_index_ * sizeof(void*));
   }
 
   void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -137,9 +137,9 @@
   MethodHelper& caller_mh_;
   const size_t args_in_regs_;
   const size_t num_params_;
-  byte* const reg_args_;
-  byte* const stack_args_;
-  byte* cur_args_;
+  uint8_t* const reg_args_;
+  uint8_t* const stack_args_;
+  uint8_t* cur_args_;
   size_t cur_arg_index_;
   size_t param_index_;
 };
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 49df62d..42ace40 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -60,7 +60,7 @@
   Runtime* runtime = Runtime::Current();
   sp->Assign(runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
   uint32_t return_pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsOnly);
-  uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
+  uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
                                                       return_pc_offset);
   CHECK_EQ(*return_pc, 0U);
   self->SetTopOfStack(sp, 0);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index f970ef8..054dd46 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -202,7 +202,7 @@
   static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
-    byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
+    uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
     return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
   }
 
@@ -210,16 +210,16 @@
   static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
-    byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
+    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
     return *reinterpret_cast<uintptr_t*>(lr);
   }
 
   QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
                        uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
           is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
-          gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
-          fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
-          stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
+          gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
+          fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
+          stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
                       + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
           gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
           is_split_long_or_double_(false) {}
@@ -232,7 +232,7 @@
     return cur_type_;
   }
 
-  byte* GetParamAddress() const {
+  uint8_t* GetParamAddress() const {
     if (!kQuickSoftFloatAbi) {
       Primitive::Type type = GetParamPrimitiveType();
       if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
@@ -398,9 +398,9 @@
   const uint32_t shorty_len_;
 
  private:
-  byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
-  byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
-  byte* const stack_args_;  // Address of stack arguments in caller's frame.
+  uint8_t* const gpr_args_;  // Address of GPR arguments in callee save frame.
+  uint8_t* const fpr_args_;  // Address of FPR arguments in callee save frame.
+  uint8_t* const stack_args_;  // Address of stack arguments in caller's frame.
   uint32_t gpr_index_;  // Index into spilled GPRs.
   uint32_t fpr_index_;  // Index into spilled FPRs.
   uint32_t stack_index_;  // Index into arguments on the stack.
@@ -1286,7 +1286,7 @@
     // We have to squeeze in the HandleScope, and relocate the method pointer.
 
     // "Free" the slot for the method.
-    sp8 += kPointerSize;  // In the callee-save frame we use a full pointer.
+    sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
 
     // Under the callee saves put handle scope and new method stack reference.
     *handle_scope_entries = num_handle_scope_references_;
@@ -1868,7 +1868,7 @@
 
     // Find the caller PC.
     constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
-    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset);
+    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
 
     // Map the caller PC to a dex PC.
     uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 66ee218..02b8a5b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -95,7 +95,7 @@
 TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
   // Ensure that the computation in callee_save_frame.h correct.
   // Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
-  // kPointerSize, which is wrong when the target bitwidth is not the same as the host's.
+  // sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
   CheckPCOffset(kRuntimeISA, Runtime::kRefsAndArgs,
                 GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs));
   CheckPCOffset(kRuntimeISA, Runtime::kRefsOnly,
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 305e5a2..cfd2a3d 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -86,201 +86,201 @@
     // TODO: Better connection. Take alignment into account.
     EXPECT_OFFSET_DIFF_GT3(Thread, tls64_.stats, tlsPtr_.card_table, 8, thread_tls64_to_tlsptr);
 
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, card_table, exception, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, exception, stack_end, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_end, managed_stack, kPointerSize);
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, card_table, exception, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, exception, stack_end, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_end, managed_stack, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, managed_stack, suspend_trigger, sizeof(ManagedStack));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, suspend_trigger, jni_env, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jni_env, self, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, self, opeer, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, kPointerSize);
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, suspend_trigger, jni_env, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jni_env, self, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, self, opeer, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, throw_location, stack_trace_sample, sizeof(ThrowLocation));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, kPointerSize);
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, deoptimization_shadow_frame,
-                        kPointerSize);
+                        sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_shadow_frame,
-                        shadow_frame_under_construction, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, shadow_frame_under_construction, name, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, kPointerSize);
+                        shadow_frame_under_construction, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, shadow_frame_under_construction, name, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
-                        kPointerSize);
+                        sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions,
-                        kPointerSize);
+                        sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, interpreter_entrypoints,
-                        kPointerSize * 3);
+                        sizeof(void*) * 3);
 
     // Skip across the entrypoints structures.
 
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, kPointerSize);
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
-                        kPointerSize * kNumRosAllocThreadLocalSizeBrackets);
+                        sizeof(void*) * kNumRosAllocThreadLocalSizeBrackets);
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
-                        kPointerSize);
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, kPointerSize);
+                        sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, sizeof(void*));
     EXPECT_OFFSET_DIFF(Thread, tlsPtr_.held_mutexes, Thread, wait_mutex_,
-                       kPointerSize * kLockLevelCount + kPointerSize, thread_tlsptr_end);
+                       sizeof(void*) * kLockLevelCount + sizeof(void*), thread_tlsptr_end);
   }
 
   void CheckInterpreterEntryPoints() {
     CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToInterpreterBridge) == 0,
             InterpreterEntryPoints_start_with_i2i);
     EXPECT_OFFSET_DIFFNP(InterpreterEntryPoints, pInterpreterToInterpreterBridge,
-                         pInterpreterToCompiledCodeBridge, kPointerSize);
+                         pInterpreterToCompiledCodeBridge, sizeof(void*));
     CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToCompiledCodeBridge)
-            + kPointerSize == sizeof(InterpreterEntryPoints), InterpreterEntryPoints_all);
+            + sizeof(void*) == sizeof(InterpreterEntryPoints), InterpreterEntryPoints_all);
   }
 
   void CheckJniEntryPoints() {
     CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup) == 0,
             JniEntryPoints_start_with_dlsymlookup);
     CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup)
-            + kPointerSize == sizeof(JniEntryPoints), JniEntryPoints_all);
+            + sizeof(void*) == sizeof(JniEntryPoints), JniEntryPoints_all);
   }
 
   void CheckPortableEntryPoints() {
     CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableImtConflictTrampoline) == 0,
             PortableEntryPoints_start_with_imt);
     EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableImtConflictTrampoline,
-                         pPortableResolutionTrampoline, kPointerSize);
+                         pPortableResolutionTrampoline, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableResolutionTrampoline,
-                         pPortableToInterpreterBridge, kPointerSize);
+                         pPortableToInterpreterBridge, sizeof(void*));
     CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableToInterpreterBridge)
-            + kPointerSize == sizeof(PortableEntryPoints), PortableEntryPoints_all);
+            + sizeof(void*) == sizeof(PortableEntryPoints), PortableEntryPoints_all);
   }
 
   void CheckQuickEntryPoints() {
     CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pAllocArray) == 0,
                 QuickEntryPoints_start_with_allocarray);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck,
-                         kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, kPointerSize);
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArrayWithAccessCheck,
-                         pInstanceofNonTrivial, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, kPointerSize);
+                         pInstanceofNonTrivial, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
-                         kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Static, pGetObjInstance, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjInstance, pGetObjStatic, kPointerSize);
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Static, pGetObjInstance, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjInstance, pGetObjStatic, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjStatic, pAputObjectWithNullAndBoundCheck,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithNullAndBoundCheck,
-                         pAputObjectWithBoundCheck, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithBoundCheck, pAputObject, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObject, pHandleFillArrayData, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pHandleFillArrayData, pJniMethodStart, kPointerSize);
+                         pAputObjectWithBoundCheck, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithBoundCheck, pAputObject, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObject, pHandleFillArrayData, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pHandleFillArrayData, pJniMethodStart, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodStart, pJniMethodStartSynchronized,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodStartSynchronized, pJniMethodEnd,
-                         kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEnd, pJniMethodEndSynchronized, kPointerSize);
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEnd, pJniMethodEndSynchronized, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndSynchronized, pJniMethodEndWithReference,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReference,
-                         pJniMethodEndWithReferenceSynchronized, kPointerSize);
+                         pJniMethodEndWithReferenceSynchronized, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReferenceSynchronized,
-                         pQuickGenericJniTrampoline, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline, pLockObject, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLockObject, pUnlockObject, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUnlockObject, pCmpgDouble, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgDouble, pCmpgFloat, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgFloat, pCmplDouble, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplDouble, pCmplFloat, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplFloat, pFmod, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmod, pL2d, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2d, pFmodf, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmodf, pL2f, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2f, pD2iz, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2iz, pF2iz, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2iz, pIdivmod, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIdivmod, pD2l, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2l, pF2l, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2l, pLdiv, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLdiv, pLmod, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmod, pLmul, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmul, pShlLong, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShlLong, pShrLong, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShrLong, pUshrLong, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUshrLong, pIndexOf, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIndexOf, pStringCompareTo, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pStringCompareTo, pMemcpy, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pMemcpy, pQuickImtConflictTrampoline, kPointerSize);
+                         pQuickGenericJniTrampoline, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline, pLockObject, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLockObject, pUnlockObject, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUnlockObject, pCmpgDouble, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgDouble, pCmpgFloat, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgFloat, pCmplDouble, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplDouble, pCmplFloat, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplFloat, pFmod, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmod, pL2d, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2d, pFmodf, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmodf, pL2f, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2f, pD2iz, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2iz, pF2iz, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2iz, pIdivmod, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIdivmod, pD2l, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2l, pF2l, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2l, pLdiv, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLdiv, pLmod, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmod, pLmul, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmul, pShlLong, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShlLong, pShrLong, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShrLong, pUshrLong, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUshrLong, pIndexOf, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIndexOf, pStringCompareTo, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pStringCompareTo, pMemcpy, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pMemcpy, pQuickImtConflictTrampoline, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickImtConflictTrampoline, pQuickResolutionTrampoline,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickResolutionTrampoline, pQuickToInterpreterBridge,
-                         kPointerSize);
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickToInterpreterBridge,
-                         pInvokeDirectTrampolineWithAccessCheck, kPointerSize);
+                         pInvokeDirectTrampolineWithAccessCheck, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeDirectTrampolineWithAccessCheck,
-                         pInvokeInterfaceTrampolineWithAccessCheck, kPointerSize);
+                         pInvokeInterfaceTrampolineWithAccessCheck, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeInterfaceTrampolineWithAccessCheck,
-                         pInvokeStaticTrampolineWithAccessCheck, kPointerSize);
+                         pInvokeStaticTrampolineWithAccessCheck, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeStaticTrampolineWithAccessCheck,
-                         pInvokeSuperTrampolineWithAccessCheck, kPointerSize);
+                         pInvokeSuperTrampolineWithAccessCheck, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeSuperTrampolineWithAccessCheck,
-                         pInvokeVirtualTrampolineWithAccessCheck, kPointerSize);
+                         pInvokeVirtualTrampolineWithAccessCheck, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeVirtualTrampolineWithAccessCheck,
-                         pTestSuspend, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, kPointerSize);
+                         pTestSuspend, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, sizeof(void*));
 
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowArrayBounds, pThrowDivZero, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowArrayBounds, pThrowDivZero, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, sizeof(void*));
 
     CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
-            + kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
+            + sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
   }
 };
 
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 6033a5f..3a17eca 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -77,7 +77,7 @@
     uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size();
     uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
     OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
-                                       4 * kPointerSize, 0u, 0u, code_size);
+                                       4 * sizeof(void*), 0u, 0u, code_size);
     fake_header_code_and_maps_.resize(sizeof(method_header));
     memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
     fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 2c72ba1..929a1d2 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -213,7 +213,7 @@
     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
                                         PROT_READ | PROT_WRITE, false, &error_msg));
     CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
-    byte* addr = mem_map_->Begin();
+    uint8_t* addr = mem_map_->Begin();
     CHECK(addr != NULL);
     debug_is_sorted_ = true;
     begin_ = reinterpret_cast<T*>(addr);
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 3b06f74..15562e5 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -27,9 +27,9 @@
 namespace gc {
 namespace accounting {
 
-static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
+static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* address) {
 #if defined(__i386__) || defined(__x86_64__)
-  Atomic<byte>* byte_atomic = reinterpret_cast<Atomic<byte>*>(address);
+  Atomic<uint8_t>* byte_atomic = reinterpret_cast<Atomic<uint8_t>*>(address);
   return byte_atomic->CompareExchangeWeakRelaxed(old_value, new_value);
 #else
   // Little endian means most significant byte is on the left.
@@ -49,19 +49,19 @@
 }
 
 template <typename Visitor>
-inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
-                              const Visitor& visitor, const byte minimum_age) const {
-  DCHECK_GE(scan_begin, reinterpret_cast<byte*>(bitmap->HeapBegin()));
+inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
+                              const Visitor& visitor, const uint8_t minimum_age) const {
+  DCHECK_GE(scan_begin, reinterpret_cast<uint8_t*>(bitmap->HeapBegin()));
   // scan_end is the byte after the last byte we scan.
-  DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
-  byte* card_cur = CardFromAddr(scan_begin);
-  byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+  DCHECK_LE(scan_end, reinterpret_cast<uint8_t*>(bitmap->HeapLimit()));
+  uint8_t* card_cur = CardFromAddr(scan_begin);
+  uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
   size_t cards_scanned = 0;
 
   // Handle any unaligned cards at the start.
-  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+  while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
       bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
@@ -70,7 +70,7 @@
     ++card_cur;
   }
 
-  byte* aligned_end = card_end -
+  uint8_t* aligned_end = card_end -
       (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
 
   uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
@@ -85,14 +85,14 @@
 
     // Find the first dirty card.
     uintptr_t start_word = *word_cur;
-    uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<byte*>(word_cur)));
+    uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<uint8_t*>(word_cur)));
     // TODO: Investigate if processing continuous runs of dirty cards with a single bitmap visit is
     // more efficient.
     for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-      if (static_cast<byte>(start_word) >= minimum_age) {
-        auto* card = reinterpret_cast<byte*>(word_cur) + i;
-        DCHECK(*card == static_cast<byte>(start_word) || *card == kCardDirty)
-            << "card " << static_cast<size_t>(*card) << " word " << (start_word & 0xFF);
+      if (static_cast<uint8_t>(start_word) >= minimum_age) {
+        auto* card = reinterpret_cast<uint8_t*>(word_cur) + i;
+        DCHECK(*card == static_cast<uint8_t>(start_word) || *card == kCardDirty)
+            << "card " << static_cast<size_t>(*card) << " intptr_t " << (start_word & 0xFF);
         bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
         ++cards_scanned;
       }
@@ -103,7 +103,7 @@
   exit_for:
 
   // Handle any unaligned cards at the end.
-  card_cur = reinterpret_cast<byte*>(word_end);
+  card_cur = reinterpret_cast<uint8_t*>(word_end);
   while (card_cur < card_end) {
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
@@ -125,16 +125,16 @@
  * us to know which cards got cleared.
  */
 template <typename Visitor, typename ModifiedVisitor>
-inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
                                          const ModifiedVisitor& modified) {
-  byte* card_cur = CardFromAddr(scan_begin);
-  byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+  uint8_t* card_cur = CardFromAddr(scan_begin);
+  uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
 
   // Handle any unaligned cards at the start.
-  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
-    byte expected, new_value;
+  while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
+    uint8_t expected, new_value;
     do {
       expected = *card_cur;
       new_value = visitor(expected);
@@ -146,9 +146,9 @@
   }
 
   // Handle unaligned cards at the end.
-  while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
+  while (!IsAligned<sizeof(intptr_t)>(card_end) && card_end > card_cur) {
     --card_end;
-    byte expected, new_value;
+    uint8_t expected, new_value;
     do {
       expected = *card_end;
       new_value = visitor(expected);
@@ -184,10 +184,10 @@
       Atomic<uintptr_t>* atomic_word = reinterpret_cast<Atomic<uintptr_t>*>(word_cur);
       if (LIKELY(atomic_word->CompareExchangeWeakRelaxed(expected_word, new_word))) {
         for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-          const byte expected_byte = expected_bytes[i];
-          const byte new_byte = new_bytes[i];
+          const uint8_t expected_byte = expected_bytes[i];
+          const uint8_t new_byte = new_bytes[i];
           if (expected_byte != new_byte) {
-            modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
+            modified(reinterpret_cast<uint8_t*>(word_cur) + i, expected_byte, new_byte);
           }
         }
         break;
@@ -197,7 +197,7 @@
   }
 }
 
-inline void* CardTable::AddrFromCard(const byte *card_addr) const {
+inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
   DCHECK(IsValidCard(card_addr))
     << " card_addr: " << reinterpret_cast<const void*>(card_addr)
     << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
@@ -206,15 +206,15 @@
   return reinterpret_cast<void*>(offset << kCardShift);
 }
 
-inline byte* CardTable::CardFromAddr(const void *addr) const {
-  byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
+inline uint8_t* CardTable::CardFromAddr(const void *addr) const {
+  uint8_t *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
   // Sanity check the caller was asking for address covered by the card table
   DCHECK(IsValidCard(card_addr)) << "addr: " << addr
       << " card_addr: " << reinterpret_cast<void*>(card_addr);
   return card_addr;
 }
 
-inline void CardTable::CheckCardValid(byte* card) const {
+inline void CardTable::CheckCardValid(uint8_t* card) const {
   DCHECK(IsValidCard(card))
       << " card_addr: " << reinterpret_cast<const void*>(card)
       << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 0498550..9a6f2b2 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -55,7 +55,7 @@
  * byte is equal to GC_DIRTY_CARD. See CardTable::Create for details.
  */
 
-CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
+CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
   /* Set up the card table */
   size_t capacity = heap_capacity / kCardSize;
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
@@ -68,13 +68,13 @@
   // don't clear the card table to avoid unnecessary pages being allocated
   COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
 
-  byte* cardtable_begin = mem_map->Begin();
+  uint8_t* cardtable_begin = mem_map->Begin();
   CHECK(cardtable_begin != NULL);
 
   // We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
   // kCardDirty, compute a offset value to make this the case
   size_t offset = 0;
-  byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
+  uint8_t* biased_begin = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
       (reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
   uintptr_t biased_byte = reinterpret_cast<uintptr_t>(biased_begin) & 0xff;
   if (biased_byte != kCardDirty) {
@@ -86,14 +86,14 @@
   return new CardTable(mem_map.release(), biased_begin, offset);
 }
 
-CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset)
+CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
     : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
 }
 
 void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
   // TODO: clear just the range of the table that has been modified
-  byte* card_start = CardFromAddr(space->Begin());
-  byte* card_end = CardFromAddr(space->End());  // Make sure to round up.
+  uint8_t* card_start = CardFromAddr(space->Begin());
+  uint8_t* card_end = CardFromAddr(space->End());  // Make sure to round up.
   memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
 }
 
@@ -106,10 +106,10 @@
   return IsValidCard(biased_begin_ + ((uintptr_t)addr >> kCardShift));
 }
 
-void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
-  byte* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
-  byte* begin = mem_map_->Begin() + offset_;
-  byte* end = mem_map_->End();
+void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
+  uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+  uint8_t* begin = mem_map_->Begin() + offset_;
+  uint8_t* end = mem_map_->End();
   CHECK(AddrIsInCardTable(addr))
       << "Card table " << this
       << " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index fbeea85..e1343c8 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -51,11 +51,11 @@
   static constexpr uint8_t kCardClean = 0x0;
   static constexpr uint8_t kCardDirty = 0x70;
 
-  static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
+  static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
 
   // Set the card associated with the given address to GC_CARD_DIRTY.
   void MarkCard(const void *addr) {
-    byte* card_addr = CardFromAddr(addr);
+    uint8_t* card_addr = CardFromAddr(addr);
     *card_addr = kCardDirty;
   }
 
@@ -65,16 +65,16 @@
   }
 
   // Return the state of the card at an address.
-  byte GetCard(const mirror::Object* obj) const {
+  uint8_t GetCard(const mirror::Object* obj) const {
     return *CardFromAddr(obj);
   }
 
   // Visit and clear cards within memory range, only visits dirty cards.
   template <typename Visitor>
   void VisitClear(const void* start, const void* end, const Visitor& visitor) {
-    byte* card_start = CardFromAddr(start);
-    byte* card_end = CardFromAddr(end);
-    for (byte* it = card_start; it != card_end; ++it) {
+    uint8_t* card_start = CardFromAddr(start);
+    uint8_t* card_end = CardFromAddr(end);
+    for (uint8_t* it = card_start; it != card_end; ++it) {
       if (*it == kCardDirty) {
         *it = kCardClean;
         visitor(it);
@@ -84,7 +84,7 @@
 
   // Returns a value that when added to a heap address >> GC_CARD_SHIFT will address the appropriate
   // card table byte. For convenience this value is cached in every Thread
-  byte* GetBiasedBegin() const {
+  uint8_t* GetBiasedBegin() const {
     return biased_begin_;
   }
 
@@ -97,20 +97,20 @@
    * us to know which cards got cleared.
    */
   template <typename Visitor, typename ModifiedVisitor>
-  void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+  void ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
                          const ModifiedVisitor& modified);
 
   // For every dirty at least minumum age between begin and end invoke the visitor with the
   // specified argument. Returns how many cards the visitor was run on.
   template <typename Visitor>
-  size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, byte* scan_begin, byte* scan_end,
+  size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
               const Visitor& visitor,
-              const byte minimum_age = kCardDirty) const
+              const uint8_t minimum_age = kCardDirty) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Assertion used to check the given address is covered by the card table
-  void CheckAddrIsInCardTable(const byte* addr) const;
+  void CheckAddrIsInCardTable(const uint8_t* addr) const;
 
   // Resets all of the bytes in the card table to clean.
   void ClearCardTable();
@@ -119,24 +119,24 @@
   void ClearSpaceCards(space::ContinuousSpace* space);
 
   // Returns the first address in the heap which maps to this card.
-  void* AddrFromCard(const byte *card_addr) const ALWAYS_INLINE;
+  void* AddrFromCard(const uint8_t *card_addr) const ALWAYS_INLINE;
 
   // Returns the address of the relevant byte in the card table, given an address on the heap.
-  byte* CardFromAddr(const void *addr) const ALWAYS_INLINE;
+  uint8_t* CardFromAddr(const void *addr) const ALWAYS_INLINE;
 
   bool AddrIsInCardTable(const void* addr) const;
 
  private:
-  CardTable(MemMap* begin, byte* biased_begin, size_t offset);
+  CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
 
   // Returns true iff the card table address is within the bounds of the card table.
-  bool IsValidCard(const byte* card_addr) const {
-    byte* begin = mem_map_->Begin() + offset_;
-    byte* end = mem_map_->End();
+  bool IsValidCard(const uint8_t* card_addr) const {
+    uint8_t* begin = mem_map_->Begin() + offset_;
+    uint8_t* end = mem_map_->End();
     return card_addr >= begin && card_addr < end;
   }
 
-  void CheckCardValid(byte* card) const ALWAYS_INLINE;
+  void CheckCardValid(uint8_t* card) const ALWAYS_INLINE;
 
   // Verifies that all gray objects are on a dirty card.
   void VerifyCardTable();
@@ -144,7 +144,7 @@
   // Mmapped pages for the card table
   std::unique_ptr<MemMap> mem_map_;
   // Value used to compute card table addresses from object addresses, see GetBiasedBegin
-  byte* const biased_begin_;
+  uint8_t* const biased_begin_;
   // Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
   // to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
   const size_t offset_;
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 433855a..819cb85 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -49,45 +49,45 @@
     }
   }
   // Default values for the test, not random to avoid undeterministic behaviour.
-  CardTableTest() : heap_begin_(reinterpret_cast<byte*>(0x2000000)), heap_size_(2 * MB) {
+  CardTableTest() : heap_begin_(reinterpret_cast<uint8_t*>(0x2000000)), heap_size_(2 * MB) {
   }
   void ClearCardTable() {
     card_table_->ClearCardTable();
   }
-  byte* HeapBegin() const {
+  uint8_t* HeapBegin() const {
     return heap_begin_;
   }
-  byte* HeapLimit() const {
+  uint8_t* HeapLimit() const {
     return HeapBegin() + heap_size_;
   }
   // Return a pseudo random card for an address.
-  byte PseudoRandomCard(const byte* addr) const {
+  uint8_t PseudoRandomCard(const uint8_t* addr) const {
     size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
     return 1 + offset % 254;
   }
   void FillRandom() {
-    for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
+    for (const uint8_t* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
       EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
-      byte* card = card_table_->CardFromAddr(addr);
+      uint8_t* card = card_table_->CardFromAddr(addr);
       *card = PseudoRandomCard(addr);
     }
   }
 
  private:
-  byte* const heap_begin_;
+  uint8_t* const heap_begin_;
   const size_t heap_size_;
 };
 
 TEST_F(CardTableTest, TestMarkCard) {
   CommonSetup();
-  for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
+  for (const uint8_t* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
     auto obj = reinterpret_cast<const mirror::Object*>(addr);
     EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
     EXPECT_TRUE(!card_table_->IsDirty(obj));
     card_table_->MarkCard(addr);
     EXPECT_TRUE(card_table_->IsDirty(obj));
     EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
-    byte* card_addr = card_table_->CardFromAddr(addr);
+    uint8_t* card_addr = card_table_->CardFromAddr(addr);
     EXPECT_EQ(*card_addr, CardTable::kCardDirty);
     *card_addr = CardTable::kCardClean;
     EXPECT_EQ(*card_addr, CardTable::kCardClean);
@@ -96,10 +96,10 @@
 
 class UpdateVisitor {
  public:
-  byte operator()(byte c) const {
+  uint8_t operator()(uint8_t c) const {
     return c * 93 + 123;
   }
-  void operator()(byte* /*card*/, byte /*expected_value*/, byte /*new_value*/) const {
+  void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
   }
 };
 
@@ -110,32 +110,32 @@
                                 8U * CardTable::kCardSize);
   UpdateVisitor visitor;
   size_t start_offset = 0;
-  for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+  for (uint8_t* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
     start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
     size_t end_offset = 0;
-    for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
+    for (uint8_t* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
       // Don't always start at a card boundary.
-      byte* start = cstart + start_offset;
-      byte* end = cend - end_offset;
+      uint8_t* start = cstart + start_offset;
+      uint8_t* end = cend - end_offset;
       end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
       // Modify cards.
       card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
       // Check adjacent cards not modified.
-      for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+      for (uint8_t* cur = start - CardTable::kCardSize; cur >= HeapBegin();
           cur -= CardTable::kCardSize) {
         EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
                   PseudoRandomCard(cur));
       }
-      for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+      for (uint8_t* cur = end + CardTable::kCardSize; cur < HeapLimit();
           cur += CardTable::kCardSize) {
         EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
                   PseudoRandomCard(cur));
       }
       // Verify Range.
-      for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+      for (uint8_t* cur = start; cur < AlignUp(end, CardTable::kCardSize);
           cur += CardTable::kCardSize) {
-        byte* card = card_table_->CardFromAddr(cur);
-        byte value = PseudoRandomCard(cur);
+        uint8_t* card = card_table_->CardFromAddr(cur);
+        uint8_t value = PseudoRandomCard(cur);
         EXPECT_EQ(visitor(value), *card);
         // Restore for next iteration.
         *card = value;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 3acf80d..753b42d 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -45,7 +45,7 @@
     : cleared_cards_(cleared_cards) {
   }
 
-  inline void operator()(byte* card, byte expected_value, byte new_value) const {
+  inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       cleared_cards_->insert(card);
     }
@@ -57,17 +57,17 @@
 
 class ModUnionClearCardVisitor {
  public:
-  explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+  explicit ModUnionClearCardVisitor(std::vector<uint8_t*>* cleared_cards)
     : cleared_cards_(cleared_cards) {
   }
 
-  void operator()(byte* card, byte expected_card, byte new_card) const {
+  void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
     if (expected_card == CardTable::kCardDirty) {
       cleared_cards_->push_back(card);
     }
   }
  private:
-  std::vector<byte*>* const cleared_cards_;
+  std::vector<uint8_t*>* const cleared_cards_;
 };
 
 class ModUnionUpdateObjectReferencesVisitor {
@@ -242,7 +242,7 @@
   CardTable* card_table = heap_->GetCardTable();
   ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
   for (const auto& ref_pair : references_) {
-    const byte* card = ref_pair.first;
+    const uint8_t* card = ref_pair.first;
     if (*card == CardTable::kCardClean) {
       std::set<const Object*> reference_set;
       for (mirror::HeapReference<Object>* obj_ptr : ref_pair.second) {
@@ -258,14 +258,14 @@
 void ModUnionTableReferenceCache::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "ModUnionTable cleared cards: [";
-  for (byte* card_addr : cleared_cards_) {
+  for (uint8_t* card_addr : cleared_cards_) {
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     uintptr_t end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
   }
   os << "]\nModUnionTable references: [";
   for (const auto& ref_pair : references_) {
-    const byte* card_addr = ref_pair.first;
+    const uint8_t* card_addr = ref_pair.first;
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     uintptr_t end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
@@ -349,7 +349,7 @@
 void ModUnionTableCardCache::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "ModUnionTable dirty cards: [";
-  for (const byte* card_addr : cleared_cards_) {
+  for (const uint8_t* card_addr : cleared_cards_) {
     auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -359,7 +359,7 @@
 
 void ModUnionTableCardCache::SetCards() {
   CardTable* card_table = heap_->GetCardTable();
-  for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+  for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
        addr += CardTable::kCardSize) {
     cleared_cards_.insert(card_table->CardFromAddr(addr));
   }
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d0e11e0..d6342cf 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -50,8 +50,8 @@
 // cleared between GC phases, reducing the number of dirty cards that need to be scanned.
 class ModUnionTable {
  public:
-  typedef std::set<byte*, std::less<byte*>,
-                   TrackingAllocator<byte*, kAllocatorTagModUnionCardSet>> CardSet;
+  typedef std::set<uint8_t*, std::less<uint8_t*>,
+                   TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
 
   explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name),
@@ -131,7 +131,7 @@
   ModUnionTable::CardSet cleared_cards_;
 
   // Maps from dirty cards to their corresponding alloc space references.
-  AllocationTrackingSafeMap<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>,
+  AllocationTrackingSafeMap<const uint8_t*, std::vector<mirror::HeapReference<mirror::Object>*>,
                             kAllocatorTagModUnionReferenceArray> references_;
 };
 
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 3ff5874..d43dc0a 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -42,7 +42,7 @@
   explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
       : dirty_cards_(dirty_cards) {}
 
-  void operator()(byte* card, byte expected_value, byte new_value) const {
+  void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       dirty_cards_->insert(card);
     }
@@ -129,7 +129,7 @@
                                          &contains_reference_to_target_space, arg);
   ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
   CardSet remove_card_set;
-  for (byte* const card_addr : dirty_cards_) {
+  for (uint8_t* const card_addr : dirty_cards_) {
     contains_reference_to_target_space = false;
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)));
@@ -145,7 +145,7 @@
 
   // Remove the cards that didn't contain a reference to the target
   // space from the dirty card set.
-  for (byte* const card_addr : remove_card_set) {
+  for (uint8_t* const card_addr : remove_card_set) {
     DCHECK(dirty_cards_.find(card_addr) != dirty_cards_.end());
     dirty_cards_.erase(card_addr);
   }
@@ -154,7 +154,7 @@
 void RememberedSet::Dump(std::ostream& os) {
   CardTable* card_table = heap_->GetCardTable();
   os << "RememberedSet dirty cards: [";
-  for (const byte* card_addr : dirty_cards_) {
+  for (const uint8_t* card_addr : dirty_cards_) {
     auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -164,8 +164,8 @@
 
 void RememberedSet::AssertAllDirtyCardsAreWithinSpace() const {
   CardTable* card_table = heap_->GetCardTable();
-  for (const byte* card_addr : dirty_cards_) {
-    auto start = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+  for (const uint8_t* card_addr : dirty_cards_) {
+    auto start = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
     DCHECK_LE(space_->Begin(), start);
     DCHECK_LE(end, space_->Limit());
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 8d66e0e..c51e26d 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -43,8 +43,8 @@
 // from the free list spaces to the bump pointer spaces.
 class RememberedSet {
  public:
-  typedef std::set<byte*, std::less<byte*>,
-                   TrackingAllocator<byte*, kAllocatorTagRememberedSet>> CardSet;
+  typedef std::set<uint8_t*, std::less<uint8_t*>,
+                   TrackingAllocator<uint8_t*, kAllocatorTagRememberedSet>> CardSet;
 
   explicit RememberedSet(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name), heap_(heap), space_(space) {}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index fc4213e..11347a5 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -35,10 +35,10 @@
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
   const size_t index = OffsetToIndex(offset);
-  const uword mask = OffsetToMask(offset);
-  Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]);
-  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-  uword old_word;
+  const uintptr_t mask = OffsetToMask(offset);
+  Atomic<uintptr_t>* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[index]);
+  DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+  uintptr_t old_word;
   do {
     old_word = atomic_entry->LoadRelaxed();
     // Fast path: The bit is already set.
@@ -82,8 +82,8 @@
   const uintptr_t index_start = OffsetToIndex(offset_start);
   const uintptr_t index_end = OffsetToIndex(offset_end);
 
-  const size_t bit_start = (offset_start / kAlignment) % kBitsPerWord;
-  const size_t bit_end = (offset_end / kAlignment) % kBitsPerWord;
+  const size_t bit_start = (offset_start / kAlignment) % kBitsPerIntPtrT;
+  const size_t bit_end = (offset_end / kAlignment) % kBitsPerIntPtrT;
 
   // Index(begin)  ...    Index(end)
   // [xxxxx???][........][????yyyy]
@@ -93,12 +93,12 @@
   //
 
   // Left edge.
-  uword left_edge = bitmap_begin_[index_start];
+  uintptr_t left_edge = bitmap_begin_[index_start];
   // Mark of lower bits that are not in range.
-  left_edge &= ~((static_cast<uword>(1) << bit_start) - 1);
+  left_edge &= ~((static_cast<uintptr_t>(1) << bit_start) - 1);
 
   // Right edge. Either unique, or left_edge.
-  uword right_edge;
+  uintptr_t right_edge;
 
   if (index_start < index_end) {
     // Left edge != right edge.
@@ -110,20 +110,20 @@
         const size_t shift = CTZ(left_edge);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         visitor(obj);
-        left_edge ^= (static_cast<uword>(1)) << shift;
+        left_edge ^= (static_cast<uintptr_t>(1)) << shift;
       } while (left_edge != 0);
     }
 
     // Traverse the middle, full part.
     for (size_t i = index_start + 1; i < index_end; ++i) {
-      uword w = bitmap_begin_[i];
+      uintptr_t w = bitmap_begin_[i];
       if (w != 0) {
         const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
         do {
           const size_t shift = CTZ(w);
           mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
           visitor(obj);
-          w ^= (static_cast<uword>(1)) << shift;
+          w ^= (static_cast<uintptr_t>(1)) << shift;
         } while (w != 0);
       }
     }
@@ -142,14 +142,14 @@
   }
 
   // Right edge handling.
-  right_edge &= ((static_cast<uword>(1) << bit_end) - 1);
+  right_edge &= ((static_cast<uintptr_t>(1) << bit_end) - 1);
   if (right_edge != 0) {
     const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
     do {
       const size_t shift = CTZ(right_edge);
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
       visitor(obj);
-      right_edge ^= (static_cast<uword>(1)) << shift;
+      right_edge ^= (static_cast<uintptr_t>(1)) << shift;
     } while (right_edge != 0);
   }
 #endif
@@ -161,10 +161,10 @@
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
   const size_t index = OffsetToIndex(offset);
-  const uword mask = OffsetToMask(offset);
-  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-  uword* address = &bitmap_begin_[index];
-  uword old_word = *address;
+  const uintptr_t mask = OffsetToMask(offset);
+  DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+  uintptr_t* address = &bitmap_begin_[index];
+  uintptr_t old_word = *address;
   if (kSetBit) {
     *address = old_word | mask;
   } else {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 39d1f9e..feb9565 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -29,21 +29,21 @@
 
 template<size_t kAlignment>
 size_t SpaceBitmap<kAlignment>::ComputeBitmapSize(uint64_t capacity) {
-  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
-  return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * kWordSize;
+  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerIntPtrT;
+  return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * sizeof(intptr_t);
 }
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
-    const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
+    const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
   CHECK(mem_map != nullptr);
-  uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
+  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
 }
 
 template<size_t kAlignment>
-SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin,
                                      size_t bitmap_size, const void* heap_begin)
     : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
       heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -57,7 +57,7 @@
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
-    const std::string& name, byte* heap_begin, size_t heap_capacity) {
+    const std::string& name, uint8_t* heap_begin, size_t heap_capacity) {
   // Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
@@ -72,8 +72,8 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
-  DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
-  size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
+  DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+  size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
   if (new_size < bitmap_size_) {
     bitmap_size_ = new_size;
   }
@@ -97,7 +97,7 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
   DCHECK_EQ(Size(), source_bitmap->Size());
-  std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
+  std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / sizeof(intptr_t), Begin());
 }
 
 template<size_t kAlignment>
@@ -106,16 +106,16 @@
   CHECK(callback != NULL);
 
   uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
-  uword* bitmap_begin = bitmap_begin_;
+  uintptr_t* bitmap_begin = bitmap_begin_;
   for (uintptr_t i = 0; i <= end; ++i) {
-    uword w = bitmap_begin[i];
+    uintptr_t w = bitmap_begin[i];
     if (w != 0) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       do {
         const size_t shift = CTZ(w);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         (*callback)(obj, arg);
-        w ^= (static_cast<uword>(1)) << shift;
+        w ^= (static_cast<uintptr_t>(1)) << shift;
       } while (w != 0);
     }
   }
@@ -139,7 +139,7 @@
   }
 
   // TODO: rewrite the callbacks to accept a std::vector<mirror::Object*> rather than a mirror::Object**?
-  constexpr size_t buffer_size = kWordSize * kBitsPerWord;
+  constexpr size_t buffer_size = sizeof(intptr_t) * kBitsPerIntPtrT;
 #ifdef __LP64__
   // Heap-allocate for smaller stack frame.
   std::unique_ptr<mirror::Object*[]> pointer_buf_ptr(new mirror::Object*[buffer_size]);
@@ -152,21 +152,21 @@
 
   size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
   size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
-  CHECK_LT(end, live_bitmap.Size() / kWordSize);
-  uword* live = live_bitmap.bitmap_begin_;
-  uword* mark = mark_bitmap.bitmap_begin_;
+  CHECK_LT(end, live_bitmap.Size() / sizeof(intptr_t));
+  uintptr_t* live = live_bitmap.bitmap_begin_;
+  uintptr_t* mark = mark_bitmap.bitmap_begin_;
   for (size_t i = start; i <= end; i++) {
-    uword garbage = live[i] & ~mark[i];
+    uintptr_t garbage = live[i] & ~mark[i];
     if (UNLIKELY(garbage != 0)) {
       uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
       do {
         const size_t shift = CTZ(garbage);
-        garbage ^= (static_cast<uword>(1)) << shift;
+        garbage ^= (static_cast<uintptr_t>(1)) << shift;
         *pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
       } while (garbage != 0);
       // Make sure that there are always enough slots available for an
       // entire word of one bits.
-      if (pb >= &pointer_buf[buffer_size - kBitsPerWord]) {
+      if (pb >= &pointer_buf[buffer_size - kBitsPerIntPtrT]) {
         (*callback)(pb - &pointer_buf[0], &pointer_buf[0], arg);
         pb = &pointer_buf[0];
       }
@@ -245,21 +245,21 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
   std::unique_ptr<SpaceBitmap<kAlignment>> visited(
-      Create("bitmap for in-order walk", reinterpret_cast<byte*>(heap_begin_),
-             IndexToOffset(bitmap_size_ / kWordSize)));
+      Create("bitmap for in-order walk", reinterpret_cast<uint8_t*>(heap_begin_),
+             IndexToOffset(bitmap_size_ / sizeof(intptr_t))));
   CHECK(bitmap_begin_ != nullptr);
   CHECK(callback != nullptr);
-  uintptr_t end = Size() / kWordSize;
+  uintptr_t end = Size() / sizeof(intptr_t);
   for (uintptr_t i = 0; i < end; ++i) {
     // Need uint for unsigned shift.
-    uword w = bitmap_begin_[i];
+    uintptr_t w = bitmap_begin_[i];
     if (UNLIKELY(w != 0)) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       while (w != 0) {
         const size_t shift = CTZ(w);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         WalkFieldsInOrder(visited.get(), callback, obj, arg);
-        w ^= (static_cast<uword>(1)) << shift;
+        w ^= (static_cast<uintptr_t>(1)) << shift;
       }
     }
   }
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index f72b30f..e73166b 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -45,13 +45,13 @@
 
   // Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
   // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
-  static SpaceBitmap* Create(const std::string& name, byte* heap_begin, size_t heap_capacity);
+  static SpaceBitmap* Create(const std::string& name, uint8_t* heap_begin, size_t heap_capacity);
 
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
   static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
-                                       byte* heap_begin, size_t heap_capacity);
+                                       uint8_t* heap_begin, size_t heap_capacity);
 
   ~SpaceBitmap();
 
@@ -59,17 +59,17 @@
   // <index> is the index of .bits that contains the bit representing
   //         <offset>.
   static constexpr size_t OffsetToIndex(size_t offset) {
-    return offset / kAlignment / kBitsPerWord;
+    return offset / kAlignment / kBitsPerIntPtrT;
   }
 
   template<typename T>
   static constexpr T IndexToOffset(T index) {
-    return static_cast<T>(index * kAlignment * kBitsPerWord);
+    return static_cast<T>(index * kAlignment * kBitsPerIntPtrT);
   }
 
   // Bits are packed in the obvious way.
-  static constexpr uword OffsetToMask(uintptr_t offset) {
-    return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
+  static constexpr uintptr_t OffsetToMask(uintptr_t offset) {
+    return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerIntPtrT);
   }
 
   bool Set(const mirror::Object* obj) ALWAYS_INLINE {
@@ -95,7 +95,7 @@
     // bitmap.
     const uintptr_t offset = reinterpret_cast<uintptr_t>(obj) - heap_begin_;
     const size_t index = OffsetToIndex(offset);
-    return index < bitmap_size_ / kWordSize;
+    return index < bitmap_size_ / sizeof(intptr_t);
   }
 
   void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
@@ -146,7 +146,7 @@
   void CopyFrom(SpaceBitmap* source_bitmap);
 
   // Starting address of our internal storage.
-  uword* Begin() {
+  uintptr_t* Begin() {
     return bitmap_begin_;
   }
 
@@ -157,7 +157,7 @@
 
   // Size in bytes of the memory that the bitmaps spans.
   uint64_t HeapSize() const {
-    return IndexToOffset<uint64_t>(Size() / kWordSize);
+    return IndexToOffset<uint64_t>(Size() / sizeof(intptr_t));
   }
 
   uintptr_t HeapBegin() const {
@@ -192,7 +192,7 @@
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
-  SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
+  SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size,
               const void* heap_begin);
 
   // Helper function for computing bitmap size based on a 64 bit capacity.
@@ -214,7 +214,7 @@
   std::unique_ptr<MemMap> mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
-  uword* const bitmap_begin_;
+  uintptr_t* const bitmap_begin_;
 
   // Size of this bitmap.
   size_t bitmap_size_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index a30bb25..40856fc 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -30,7 +30,7 @@
 class SpaceBitmapTest : public CommonRuntimeTest {};
 
 TEST_F(SpaceBitmapTest, Init) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
@@ -51,21 +51,21 @@
     EXPECT_EQ(bitmap_->Test(obj), ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
   }
 
-  ContinuousSpaceBitmap* bitmap_;
+  ContinuousSpaceBitmap* const bitmap_;
   const mirror::Object* begin_;
   const mirror::Object* end_;
 };
 
 TEST_F(SpaceBitmapTest, ScanRange) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
   EXPECT_TRUE(space_bitmap.get() != NULL);
 
-  // Set all the odd bits in the first BitsPerWord * 3 to one.
-  for (size_t j = 0; j < kBitsPerWord * 3; ++j) {
+  // Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
+  for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
     const mirror::Object* obj =
         reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
     if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
@@ -76,10 +76,10 @@
   // possible length up to a maximum of kBitsPerWord * 2 - 1 bits.
   // This handles all the cases, having runs which start and end on the same word, and different
   // words.
-  for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
+  for (size_t i = 0; i < static_cast<size_t>(kBitsPerIntPtrT); ++i) {
     mirror::Object* start =
         reinterpret_cast<mirror::Object*>(heap_begin + i * kObjectAlignment);
-    for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
+    for (size_t j = 0; j < static_cast<size_t>(kBitsPerIntPtrT * 2); ++j) {
       mirror::Object* end =
           reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
       BitmapVerify(space_bitmap.get(), start, end);
@@ -95,7 +95,7 @@
     (*count_)++;
   }
 
-  size_t* count_;
+  size_t* const count_;
 };
 
 class RandGen {
@@ -112,7 +112,7 @@
 
 template <size_t kAlignment>
 void RunTest() NO_THREAD_SAFETY_ANALYSIS {
-  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
   // Seed with 0x1234 for reproducability.
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index c69ca48..dd419a4 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -36,7 +36,7 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory && m != nullptr) {
-    byte* bytes = reinterpret_cast<byte*>(m);
+    uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
     for (size_t i = 0; i < size; ++i) {
       DCHECK_EQ(bytes[i], 0);
     }
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a7e5e74..a3408cf 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -49,7 +49,7 @@
 
 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
                    PageReleaseMode page_release_mode, size_t page_release_size_threshold)
-    : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
+    : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
       capacity_(capacity), max_capacity_(max_capacity),
       lock_("rosalloc global lock", kRosAllocGlobalLock),
       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
@@ -107,7 +107,7 @@
   }
 }
 
-void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
+void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
   lock_.AssertHeld(self);
   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
   FreePageRun* res = NULL;
@@ -128,7 +128,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -226,7 +226,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split if there's a remainder.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -290,9 +290,9 @@
   lock_.AssertHeld(self);
   size_t pm_idx = ToPageMapIndex(ptr);
   DCHECK_LT(pm_idx, page_map_size_);
-  byte pm_type = page_map_[pm_idx];
+  uint8_t pm_type = page_map_[pm_idx];
   DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
-  byte pm_part_type;
+  uint8_t pm_part_type;
   switch (pm_type) {
   case kPageMapRun:
     pm_part_type = kPageMapRunPart;
@@ -319,8 +319,8 @@
   const size_t byte_size = num_pages * kPageSize;
   if (already_zero) {
     if (kCheckZeroMemory) {
-      const uword* word_ptr = reinterpret_cast<uword*>(ptr);
-      for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
+      const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
+      for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
         CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
       }
     }
@@ -473,9 +473,9 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory) {
-    CHECK_EQ(total_bytes % sizeof(uword), 0U);
-    const uword* words = reinterpret_cast<uword*>(r);
-    for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
+    CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
+    const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
+    for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
       CHECK_EQ(words[i], 0U);
     }
   }
@@ -490,7 +490,7 @@
   {
     MutexLock mu(self, lock_);
     DCHECK_LT(pm_idx, page_map_size_);
-    byte page_map_entry = page_map_[pm_idx];
+    uint8_t page_map_entry = page_map_[pm_idx];
     if (kTraceRosAlloc) {
       LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
                 << ", page_map_entry=" << static_cast<int>(page_map_entry);
@@ -557,7 +557,7 @@
         const size_t num_of_slots = numOfSlots[idx];
         const size_t bracket_size = bracketSizes[idx];
         const size_t num_of_bytes = num_of_slots * bracket_size;
-        byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
+        uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
         for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
           __builtin_prefetch(begin + i);
         }
@@ -869,7 +869,7 @@
       DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
       *alloc_bitmap_ptr |= mask;
       DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
-      byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
+      uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
                   << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
@@ -889,10 +889,10 @@
 
 void RosAlloc::Run::FreeSlot(void* ptr) {
   DCHECK(!IsThreadLocal());
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t bracket_size = bracketSizes[idx];
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
   size_t slot_idx = offset_from_slot_base / bracket_size;
   DCHECK_LT(slot_idx, numOfSlots[idx]);
@@ -1001,9 +1001,9 @@
 
 inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
                                                   const char* caller_name) {
-  const byte idx = size_bracket_idx_;
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const uint8_t idx = size_bracket_idx_;
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   const size_t bracket_size = bracketSizes[idx];
   memset(ptr, 0, bracket_size);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
@@ -1037,7 +1037,7 @@
 }
 
 inline bool RosAlloc::Run::IsAllFree() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = NumberOfBitmapVectors();
   DCHECK_NE(num_vec, 0U);
@@ -1095,13 +1095,13 @@
 }
 
 inline void RosAlloc::Run::ZeroHeader() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   memset(this, 0, headerSizes[idx]);
 }
 
 inline void RosAlloc::Run::ZeroData() {
-  const byte idx = size_bracket_idx_;
-  byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  const uint8_t idx = size_bracket_idx_;
+  uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
 }
 
@@ -1114,10 +1114,10 @@
 void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                                     void* arg) {
   size_t idx = size_bracket_idx_;
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   size_t num_slots = numOfSlots[idx];
   size_t bracket_size = IndexToBracketSize(idx);
-  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
+  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
   size_t num_vec = RoundUp(num_slots, 32) / 32;
   size_t slots = 0;
   for (size_t v = 0; v < num_vec; v++, slots += 32) {
@@ -1126,7 +1126,7 @@
     size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
     for (size_t i = 0; i < end; ++i) {
       bool is_allocated = ((vec >> i) & 0x1) != 0;
-      byte* slot_addr = slot_base + (slots + i) * bracket_size;
+      uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
       if (is_allocated) {
         handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
       } else {
@@ -1169,7 +1169,7 @@
     Run* run = nullptr;
     if (kReadPageMapEntryWithoutLockInBulkFree) {
       // Read the page map entries without locking the lock.
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1196,7 +1196,7 @@
       // Read the page map entries with a lock.
       MutexLock mu(self, lock_);
       DCHECK_LT(pm_idx, page_map_size_);
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1354,7 +1354,7 @@
   size_t remaining_curr_fpr_size = 0;
   size_t num_running_empty_pages = 0;
   for (size_t i = 0; i < end; ++i) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1472,8 +1472,8 @@
       Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
       DCHECK_EQ(run->magic_num_, kMagicNum);
       size_t idx = run->size_bracket_idx_;
-      size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-          - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
+      size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+          - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
       DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
       return IndexToBracketSize(idx);
     }
@@ -1503,8 +1503,8 @@
     size_t new_num_of_pages = new_footprint / kPageSize;
     DCHECK_GE(page_map_size_, new_num_of_pages);
     // Zero out the tail of the page map.
-    byte* zero_begin = const_cast<byte*>(page_map_) + new_num_of_pages;
-    byte* madvise_begin = AlignUp(zero_begin, kPageSize);
+    uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
+    uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
     DCHECK_LE(madvise_begin, page_map_mem_map_->End());
     size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
     if (madvise_size > 0) {
@@ -1544,7 +1544,7 @@
   size_t pm_end = page_map_size_;
   size_t i = 0;
   while (i < pm_end) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1558,9 +1558,9 @@
         if (kIsDebugBuild) {
           // In the debug build, the first page of a free page run
           // contains a magic number for debugging. Exclude it.
-          start = reinterpret_cast<byte*>(fpr) + kPageSize;
+          start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
         }
-        void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+        void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
         handler(start, end, 0, arg);
         size_t num_pages = fpr_size / kPageSize;
         if (kIsDebugBuild) {
@@ -1879,7 +1879,7 @@
     size_t pm_end = page_map_size_;
     size_t i = 0;
     while (i < pm_end) {
-      byte pm = page_map_[i];
+      uint8_t pm = page_map_[i];
       switch (pm) {
         case kPageMapReleased:
           // Fall-through.
@@ -1994,13 +1994,13 @@
   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
   const size_t idx = size_bracket_idx_;
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = RoundUp(num_slots, 32) / 32;
   CHECK_GT(num_vec, 0U);
   size_t bracket_size = IndexToBracketSize(idx);
   CHECK_EQ(slot_base + num_slots * bracket_size,
-           reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
+           reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
       << "Mismatch in the end address of the run " << Dump();
   // Check that the bulk free bitmap is clean. It's only used during BulkFree().
   CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
@@ -2084,7 +2084,7 @@
       // thread local free bitmap.
       bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
       if (is_allocated && !is_thread_local_freed) {
-        byte* slot_addr = slot_base + (slots + i) * bracket_size;
+        uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
         size_t obj_size = obj->SizeOf();
         CHECK_LE(obj_size, kLargeSizeThreshold)
@@ -2108,7 +2108,7 @@
   while (i < page_map_size_) {
     // Reading the page map without a lock is racy but the race is benign since it should only
     // result in occasionally not releasing pages which we could release.
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall through.
@@ -2129,7 +2129,7 @@
           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
             size_t fpr_size = fpr->ByteSize(this);
             DCHECK(IsAligned<kPageSize>(fpr_size));
-            byte* start = reinterpret_cast<byte*>(fpr);
+            uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
             size_t pages = fpr_size / kPageSize;
             CHECK_GT(pages, 0U) << "Infinite loop probable";
@@ -2154,7 +2154,7 @@
   return reclaimed_bytes;
 }
 
-size_t RosAlloc::ReleasePageRange(byte* start, byte* end) {
+size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
   DCHECK_ALIGNED(start, kPageSize);
   DCHECK_ALIGNED(end, kPageSize);
   DCHECK_LT(start, end);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 2fbd97a..8374ff7 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -44,13 +44,13 @@
   // Represents a run of free pages.
   class FreePageRun {
    public:
-    byte magic_num_;  // The magic number used for debugging only.
+    uint8_t magic_num_;  // The magic number used for debugging only.
 
     bool IsFree() const {
       return !kIsDebugBuild || magic_num_ == kMagicNumFree;
     }
     size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      const byte* fpr_base = reinterpret_cast<const byte*>(this);
+      const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
       DCHECK_GE(byte_size, static_cast<size_t>(0));
@@ -60,7 +60,7 @@
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
-      byte* fpr_base = reinterpret_cast<byte*>(this);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
     }
@@ -68,8 +68,8 @@
       return reinterpret_cast<void*>(this);
     }
     void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* fpr_base = reinterpret_cast<byte*>(this);
-      byte* end = fpr_base + ByteSize(rosalloc);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
+      uint8_t* end = fpr_base + ByteSize(rosalloc);
       return end;
     }
     bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
@@ -78,7 +78,7 @@
     }
     bool IsAtEndOfSpace(RosAlloc* rosalloc)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
+      return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
     }
     bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       switch (rosalloc->page_release_mode_) {
@@ -98,7 +98,7 @@
       }
     }
     void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* start = reinterpret_cast<byte*>(this);
+      uint8_t* start = reinterpret_cast<uint8_t*>(this);
       size_t byte_size = ByteSize(rosalloc);
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
       if (ShouldReleasePages(rosalloc)) {
@@ -151,10 +151,10 @@
   //
   class Run {
    public:
-    byte magic_num_;                 // The magic number used for debugging.
-    byte size_bracket_idx_;          // The index of the size bracket of this run.
-    byte is_thread_local_;           // True if this run is used as a thread-local run.
-    byte to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
+    uint8_t magic_num_;                 // The magic number used for debugging.
+    uint8_t size_bracket_idx_;          // The index of the size bracket of this run.
+    uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
+    uint8_t to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
     uint32_t first_search_vec_idx_;  // The index of the first bitmap vector which may contain an available slot.
     uint32_t alloc_bit_map_[0];      // The bit map that allocates if each slot is in use.
 
@@ -175,20 +175,20 @@
     // Returns the byte size of the header except for the bit maps.
     static size_t fixed_header_size() {
       Run temp;
-      size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
+      size_t size = reinterpret_cast<uint8_t*>(&temp.alloc_bit_map_) - reinterpret_cast<uint8_t*>(&temp);
       DCHECK_EQ(size, static_cast<size_t>(8));
       return size;
     }
     // Returns the base address of the free bit map.
     uint32_t* BulkFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
     }
     // Returns the base address of the thread local free bit map.
     uint32_t* ThreadLocalFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
     }
     void* End() {
-      return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
+      return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
     }
     // Returns the number of bitmap words per run.
     size_t NumberOfBitmapVectors() const {
@@ -259,13 +259,13 @@
   };
 
   // The magic number for a run.
-  static const byte kMagicNum = 42;
+  static constexpr uint8_t kMagicNum = 42;
   // The magic number for free pages.
-  static const byte kMagicNumFree = 43;
+  static constexpr uint8_t kMagicNumFree = 43;
   // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
-  static const size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
+  static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
   // The number of smaller size brackets that are 16 bytes apart.
-  static const size_t kNumOfQuantumSizeBrackets = 32;
+  static constexpr size_t kNumOfQuantumSizeBrackets = 32;
   // The sizes (the slot sizes, in bytes) of the size brackets.
   static size_t bracketSizes[kNumOfSizeBrackets];
   // The numbers of pages that are used for runs for each size bracket.
@@ -356,13 +356,13 @@
   // address is page size aligned.
   size_t ToPageMapIndex(const void* addr) const {
     DCHECK(base_ <= addr && addr < base_ + capacity_);
-    size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
+    size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
     DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
     return byte_offset / kPageSize;
   }
   // Returns the page map index from an address with rounding.
   size_t RoundDownToPageMapIndex(void* addr) const {
-    DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
+    DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
     return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
   }
 
@@ -409,7 +409,7 @@
 
  private:
   // The base address of the memory region that's managed by this allocator.
-  byte* base_;
+  uint8_t* base_;
 
   // The footprint in bytes of the currently allocated portion of the
   // memory region.
@@ -455,7 +455,7 @@
     kPageMapLargeObjectPart,  // The non-beginning part of a large object.
   };
   // The table that indicates what pages are currently used for.
-  volatile byte* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
+  volatile uint8_t* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
   size_t page_map_size_;
   size_t max_page_map_size_;
   std::unique_ptr<MemMap> page_map_mem_map_;
@@ -481,12 +481,12 @@
   const size_t page_release_size_threshold_;
 
   // The base address of the memory region that's managed by this allocator.
-  byte* Begin() { return base_; }
+  uint8_t* Begin() { return base_; }
   // The end address of the memory region that's managed by this allocator.
-  byte* End() { return base_ + capacity_; }
+  uint8_t* End() { return base_ + capacity_; }
 
   // Page-granularity alloc/free
-  void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
+  void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   // Returns how many bytes were freed.
   size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -524,7 +524,7 @@
   void RevokeThreadUnsafeCurrentRuns();
 
   // Release a range of pages.
-  size_t ReleasePageRange(byte* start, byte* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -580,7 +580,7 @@
   }
   bool IsFreePage(size_t idx) const {
     DCHECK_LT(idx, capacity_ / kPageSize);
-    byte pm_type = page_map_[idx];
+    uint8_t pm_type = page_map_[idx];
     return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
   }
 
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index b3bed64..6691b0f 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -120,7 +120,7 @@
 void MarkCompact::CalculateObjectForwardingAddresses() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   // The bump pointer in the space where the next forwarding address will be.
-  bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+  bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
   // Visit all the marked objects in the bitmap.
   CalculateObjectForwardingAddressVisitor visitor(this);
   objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index bb85fa0..f40e870 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -227,7 +227,7 @@
   std::string collector_name_;
 
   // The bump pointer in the space where the next forwarding address will be.
-  byte* bump_pointer_;
+  uint8_t* bump_pointer_;
   // How many live objects we have in the space.
   size_t live_objects_in_space_;
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 930499a..942b556 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -689,7 +689,7 @@
  public:
   CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
                accounting::ContinuousSpaceBitmap* bitmap,
-               byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
+               uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
                Object** mark_stack_obj)
       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
         bitmap_(bitmap),
@@ -700,9 +700,9 @@
 
  protected:
   accounting::ContinuousSpaceBitmap* const bitmap_;
-  byte* const begin_;
-  byte* const end_;
-  const byte minimum_age_;
+  uint8_t* const begin_;
+  uint8_t* const end_;
+  const uint8_t minimum_age_;
 
   virtual void Finalize() {
     delete this;
@@ -730,7 +730,7 @@
   }
 }
 
-void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
+void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
   accounting::CardTable* card_table = GetHeap()->GetCardTable();
   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
   size_t thread_count = GetThreadCount(paused);
@@ -754,8 +754,8 @@
       if (space->GetMarkBitmap() == nullptr) {
         continue;
       }
-      byte* card_begin = space->Begin();
-      byte* card_end = space->End();
+      uint8_t* card_begin = space->Begin();
+      uint8_t* card_end = space->End();
       // Align up the end address. For example, the image space's end
       // may not be card-size-aligned.
       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
@@ -910,7 +910,7 @@
   return nullptr;
 }
 
-void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
+void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
   ScanGrayObjects(paused, minimum_age);
   ProcessMarkStack(paused);
 }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 2780099..9ac110d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -112,7 +112,7 @@
   virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
-  void RecursiveMarkDirtyObjects(bool paused, byte minimum_age)
+  void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -257,7 +257,7 @@
   void PushOnMarkStack(mirror::Object* obj);
 
   // Blackens objects grayed during a garbage collection.
-  void ScanGrayObjects(bool paused, byte minimum_age)
+  void ScanGrayObjects(bool paused, uint8_t minimum_age)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c8fa869..9459a3b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -437,15 +437,15 @@
     return 0;
   }
   size_t saved_bytes = 0;
-  byte* byte_dest = reinterpret_cast<byte*>(dest);
+  uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
   if (kIsDebugBuild) {
     for (size_t i = 0; i < size; ++i) {
       CHECK_EQ(byte_dest[i], 0U);
     }
   }
   // Process the start of the page. The page must already be dirty, don't bother with checking.
-  const byte* byte_src = reinterpret_cast<const byte*>(src);
-  const byte* limit = byte_src + size;
+  const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
+  const uint8_t* limit = byte_src + size;
   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
   // Copy the bytes until the start of the next page.
   memcpy(dest, src, page_remain);
@@ -481,7 +481,7 @@
   const size_t object_size = obj->SizeOf();
   size_t bytes_allocated;
   mirror::Object* forward_address = nullptr;
-  if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
+  if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
     // If it's allocated before the last GC (older), move
     // (pseudo-promote) it to the main free list space (as sort
     // of an old generation.)
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 71a83f2..1c4f1e4 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -228,7 +228,7 @@
 
   // Used for the generational mode. the end/top of the bump
   // pointer space at the end of the last collection.
-  byte* last_gc_to_space_end_;
+  uint8_t* last_gc_to_space_end_;
 
   // Used for the generational mode. During a collection, keeps track
   // of how many bytes of objects have been copied so far from the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d672510..b9d69d5 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -199,7 +199,7 @@
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
-  byte* requested_alloc_space_begin = nullptr;
+  uint8_t* requested_alloc_space_begin = nullptr;
   if (!image_file_name.empty()) {
     std::string error_msg;
     space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
@@ -209,7 +209,7 @@
       AddSpace(image_space);
       // Oat files referenced by image files immediately follow them in memory, ensure alloc space
       // isn't going to get in the middle
-      byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
+      uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
       CHECK_GT(oat_file_end_addr, image_space->End());
       requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
     } else {
@@ -245,7 +245,7 @@
   }
   std::unique_ptr<MemMap> main_mem_map_1;
   std::unique_ptr<MemMap> main_mem_map_2;
-  byte* request_begin = requested_alloc_space_begin;
+  uint8_t* request_begin = requested_alloc_space_begin;
   if (request_begin != nullptr && separate_non_moving_space) {
     request_begin += non_moving_space_capacity;
   }
@@ -259,7 +259,7 @@
                              non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
     CHECK(non_moving_space_mem_map != nullptr) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
-    request_begin = reinterpret_cast<byte*>(300 * MB);
+    request_begin = reinterpret_cast<uint8_t*>(300 * MB);
   }
   // Attempt to create 2 mem maps at or after the requested begin.
   main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
@@ -350,8 +350,8 @@
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   CHECK(!continuous_spaces_.empty());
   // Relies on the spaces being sorted.
-  byte* heap_begin = continuous_spaces_.front()->Begin();
-  byte* heap_end = continuous_spaces_.back()->Limit();
+  uint8_t* heap_begin = continuous_spaces_.front()->Begin();
+  uint8_t* heap_end = continuous_spaces_.back()->Limit();
   size_t heap_capacity = heap_end - heap_begin;
   // Remove the main backup space since it slows down the GC to have unused extra spaces.
   if (main_space_backup_.get() != nullptr) {
@@ -433,7 +433,7 @@
   }
 }
 
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
                                            int prot_flags, std::string* out_error_str) {
   while (true) {
     MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
@@ -2265,7 +2265,7 @@
       accounting::CardTable* card_table = heap_->GetCardTable();
       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
-      byte* card_addr = card_table->CardFromAddr(obj);
+      uint8_t* card_addr = card_table->CardFromAddr(obj);
       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
                  << offset << "\n card value = " << static_cast<int>(*card_addr);
       if (heap_->IsValidObjectAddress(obj->GetClass())) {
@@ -2295,7 +2295,7 @@
                    << ") is not a valid heap address";
       }
 
-      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+      card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
       void* cover_begin = card_table->AddrFromCard(card_addr);
       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
           accounting::CardTable::kCardSize);
@@ -2328,7 +2328,7 @@
         }
         // Attempt to see if the card table missed the reference.
         ScanVisitor scan_visitor;
-        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+        uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
         card_table->Scan(bitmap, byte_cover_begin,
                          byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
       }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index faaea40..c09dca8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -94,7 +94,7 @@
 
 class AgeCardVisitor {
  public:
-  byte operator()(byte card) const {
+  uint8_t operator()(uint8_t card) const {
     if (card == accounting::CardTable::kCardDirty) {
       return card - 1;
     } else {
@@ -625,7 +625,7 @@
   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
 
   // Create a mem map with a preferred base address.
-  static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+  static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
                                               size_t capacity, int prot_flags,
                                               std::string* out_error_str);
 
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index e6b5c75..3106b4c 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -62,7 +62,7 @@
 }
 
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
-  byte* heap_begin = reinterpret_cast<byte*>(0x1000);
+  uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x1000);
   const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
   std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
       accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index ee3c979..9f1f953 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -41,7 +41,7 @@
                                                            size_t* usable_size) {
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   num_bytes = RoundUp(num_bytes, kAlignment);
-  byte* end = end_.LoadRelaxed();
+  uint8_t* end = end_.LoadRelaxed();
   if (end + num_bytes > growth_end_) {
     return nullptr;
   }
@@ -59,8 +59,8 @@
 
 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
   DCHECK(IsAligned<kAlignment>(num_bytes));
-  byte* old_end;
-  byte* new_end;
+  uint8_t* old_end;
+  uint8_t* new_end;
   do {
     old_end = end_.LoadRelaxed();
     new_end = old_end + num_bytes;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fb6bbac..8f42642 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -25,7 +25,7 @@
 namespace space {
 
 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
-                                           byte* requested_begin) {
+                                           uint8_t* requested_begin) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
@@ -42,7 +42,7 @@
   return new BumpPointerSpace(name, mem_map);
 }
 
-BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
     : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
                                  kGcRetentionPolicyAlwaysCollect),
       growth_end_(limit),
@@ -134,12 +134,12 @@
 }
 
 // Returns the start of the storage.
-byte* BumpPointerSpace::AllocBlock(size_t bytes) {
+uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
   bytes = RoundUp(bytes, kAlignment);
   if (!num_blocks_) {
     UpdateMainBlock();
   }
-  byte* storage = reinterpret_cast<byte*>(
+  uint8_t* storage = reinterpret_cast<uint8_t*>(
       AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
   if (LIKELY(storage != nullptr)) {
     BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
@@ -151,9 +151,9 @@
 }
 
 void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
-  byte* pos = Begin();
-  byte* end = End();
-  byte* main_end = pos;
+  uint8_t* pos = Begin();
+  uint8_t* end = End();
+  uint8_t* main_end = pos;
   {
     MutexLock mu(Thread::Current(), block_lock_);
     // If we have 0 blocks then we need to update the main header since we have bump pointer style
@@ -179,7 +179,7 @@
       return;
     } else {
       callback(obj, arg);
-      pos = reinterpret_cast<byte*>(GetNextObject(obj));
+      pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
     }
   }
   // Walk the other blocks (currently only TLABs).
@@ -189,7 +189,7 @@
     pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
     const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
-    CHECK_LE(reinterpret_cast<const byte*>(end), End());
+    CHECK_LE(reinterpret_cast<const uint8_t*>(end), End());
     // We don't know how many objects are allocated in the current block. When we hit a null class
     // assume its the end. TODO: Have a thread update the header when it flushes the block?
     while (obj < end && obj->GetClass() != nullptr) {
@@ -250,7 +250,7 @@
 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
   MutexLock mu(Thread::Current(), block_lock_);
   RevokeThreadLocalBuffersLocked(self);
-  byte* start = AllocBlock(bytes);
+  uint8_t* start = AllocBlock(bytes);
   if (start == nullptr) {
     return false;
   }
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 71b15ba..98a3189 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -42,7 +42,7 @@
   // Create a bump pointer space with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+  static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
 
   // Allocate num_bytes, returns nullptr if the space is full.
@@ -121,12 +121,12 @@
   }
 
   bool Contains(const mirror::Object* obj) const {
-    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return byte_obj >= Begin() && byte_obj < End();
   }
 
   // TODO: Change this? Mainly used for compacting to a particular region of memory.
-  BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+  BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
 
   // Return the object which comes after obj, while ensuring alignment.
   static mirror::Object* GetNextObject(mirror::Object* obj)
@@ -161,7 +161,7 @@
   BumpPointerSpace(const std::string& name, MemMap* mem_map);
 
   // Allocate a raw block of bytes.
-  byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+  uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
   void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
 
   // The main block is an unbounded block where objects go when there are no other blocks. This
@@ -169,7 +169,7 @@
   // allocation. The main block starts at the space Begin().
   void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
 
-  byte* growth_end_;
+  uint8_t* growth_end_;
   AtomicInteger objects_allocated_;  // Accumulated from revoked thread local regions.
   AtomicInteger bytes_allocated_;  // Accumulated from revoked thread local regions.
   Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 456d1b3..d2d95b4 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -35,8 +35,8 @@
 
 template class ValgrindMallocSpace<DlMallocSpace, void*>;
 
-DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
-                             byte* end, byte* limit, size_t growth_limit,
+DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin,
+                             uint8_t* end, uint8_t* limit, size_t growth_limit,
                              bool can_move_objects, size_t starting_size,
                              size_t initial_size)
     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
@@ -57,13 +57,13 @@
   }
 
   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
-  byte* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map->Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
   }
 
   // Everything is set so record in immutable structure and leave
-  byte* begin = mem_map->Begin();
+  uint8_t* begin = mem_map->Begin();
   if (Runtime::Current()->RunningOnValgrind()) {
     return new ValgrindMallocSpace<DlMallocSpace, void*>(
         name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
@@ -75,7 +75,7 @@
 }
 
 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, byte* requested_begin,
+                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
                                      bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -149,8 +149,8 @@
 }
 
 MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
-                                           void* allocator, byte* begin, byte* end,
-                                           byte* limit, size_t growth_limit,
+                                           void* allocator, uint8_t* begin, uint8_t* end,
+                                           uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
   return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
                            can_move_objects, starting_size_, initial_size_);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 7aff14b..3b8065e 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -44,7 +44,7 @@
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, byte* requested_begin, bool can_move_objects);
+                               size_t capacity, uint8_t* requested_begin, bool can_move_objects);
 
   // Virtual to allow ValgrindMallocSpace to intercept.
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -108,7 +108,7 @@
   void SetFootprintLimit(size_t limit) OVERRIDE;
 
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                              byte* begin, byte* end, byte* limit, size_t growth_limit,
+                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                               bool can_move_objects);
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -128,8 +128,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
-                byte* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
+  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin, uint8_t* end,
+                uint8_t* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
                 size_t initial_size);
 
  private:
@@ -144,7 +144,7 @@
   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
 
   // The boundary tag overhead.
-  static const size_t kChunkOverhead = kWordSize;
+  static const size_t kChunkOverhead = sizeof(intptr_t);
 
   // Underlying malloc space.
   void* mspace_;
diff --git a/runtime/gc/space/dlmalloc_space_base_test.cc b/runtime/gc/space/dlmalloc_space_base_test.cc
index 02fc4a5..93fe155 100644
--- a/runtime/gc/space/dlmalloc_space_base_test.cc
+++ b/runtime/gc/space/dlmalloc_space_base_test.cc
@@ -24,7 +24,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index 4b1a1b1..f9b41da 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -23,7 +23,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index d17d0a7..5758e0c 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -23,7 +23,7 @@
 namespace space {
 
 MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
 }
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 59630fe..452af90 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -526,7 +526,7 @@
 }
 
 void ImageSpace::VerifyImageAllocations() {
-  byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+  uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
   while (current < End()) {
     DCHECK_ALIGNED(current, kObjectAlignment);
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
@@ -595,7 +595,7 @@
                                        bitmap_index));
   std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
       accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
-                                                          reinterpret_cast<byte*>(map->Begin()),
+                                                          reinterpret_cast<uint8_t*>(map->Begin()),
                                                           map->Size()));
   if (bitmap.get() == nullptr) {
     *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index dad5855..9434bfe 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,7 +45,7 @@
     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
         reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
     VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
                                kValgrindRedZoneBytes);
     if (usable_size != nullptr) {
       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
@@ -84,7 +84,7 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
       total_objects_allocated_(0), begin_(begin), end_(end) {
@@ -122,8 +122,8 @@
   mem_maps_.Put(obj, mem_map);
   const size_t allocation_size = mem_map->BaseSize();
   DCHECK(bytes_allocated != nullptr);
-  begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
-  byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
+  begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
+  uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
   if (end_ == nullptr || obj_end > end_) {
     end_ = obj_end;
   }
@@ -283,7 +283,7 @@
   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
 }
 
-FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
@@ -292,7 +292,7 @@
   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
 }
 
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
     : LargeObjectSpace(name, begin, end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
@@ -319,8 +319,8 @@
   while (cur_info < end_info) {
     if (!cur_info->IsFree()) {
       size_t alloc_size = cur_info->ByteSize();
-      byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
-      byte* byte_end = byte_start + alloc_size;
+      uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
+      uint8_t* byte_end = byte_start + alloc_size;
       callback(byte_start, byte_end, alloc_size, arg);
       callback(nullptr, nullptr, 0, arg);
     }
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index a63c5c0a..850a006 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -77,11 +77,11 @@
     return false;
   }
   // Current address at which the space begins, which may vary as the space is filled.
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return begin_;
   }
   // Current address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
+  uint8_t* End() const {
     return end_;
   }
   // Current size of space
@@ -90,14 +90,14 @@
   }
   // Return true if we contain the specified address.
   bool Contains(const mirror::Object* obj) const {
-    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return Begin() <= byte_obj && byte_obj < End();
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+  explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   // Approximate number of bytes which have been allocated into the space.
@@ -106,8 +106,8 @@
   uint64_t total_bytes_allocated_;
   uint64_t total_objects_allocated_;
   // Begin and end, may change as more large objects are allocated.
-  byte* begin_;
-  byte* end_;
+  uint8_t* begin_;
+  uint8_t* end_;
 
   friend class Space;
 
@@ -149,7 +149,7 @@
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
-  static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+  static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -159,7 +159,7 @@
   void Dump(std::ostream& os) const;
 
  protected:
-  FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+  FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
   size_t GetSlotIndexForAddress(uintptr_t address) const {
     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index c5d8abc..e17bad8 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -55,7 +55,7 @@
         ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
         ASSERT_GE(allocation_size, request_size);
         // Fill in our magic value.
-        byte magic = (request_size & 0xFF) | 1;
+        uint8_t magic = (request_size & 0xFF) | 1;
         memset(obj, magic, request_size);
         requests.push_back(std::make_pair(obj, request_size));
       }
@@ -73,9 +73,9 @@
         mirror::Object* obj = requests.back().first;
         size_t request_size = requests.back().second;
         requests.pop_back();
-        byte magic = (request_size & 0xFF) | 1;
+        uint8_t magic = (request_size & 0xFF) | 1;
         for (size_t k = 0; k < request_size; ++k) {
-          ASSERT_EQ(reinterpret_cast<const byte*>(obj)[k], magic);
+          ASSERT_EQ(reinterpret_cast<const uint8_t*>(obj)[k], magic);
         }
         ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
       }
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index ba7e5c1..9d1fbbe 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -36,7 +36,7 @@
 size_t MallocSpace::bitmap_index_ = 0;
 
 MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
-                         byte* begin, byte* end, byte* limit, size_t growth_limit,
+                         uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                          bool create_bitmaps, bool can_move_objects, size_t starting_size,
                          size_t initial_size)
     : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
@@ -66,7 +66,7 @@
 }
 
 MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                                  size_t* growth_limit, size_t* capacity, byte* requested_begin) {
+                                  size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -129,10 +129,10 @@
 
 void* MallocSpace::MoreCore(intptr_t increment) {
   CheckMoreCoreForPrecondition();
-  byte* original_end = End();
+  uint8_t* original_end = End();
   if (increment != 0) {
     VLOG(heap) << "MallocSpace::MoreCore " << PrettySize(increment);
-    byte* new_end = original_end + increment;
+    uint8_t* new_end = original_end + increment;
     if (increment > 0) {
       // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
       // by mspace_set_footprint_limit.
@@ -163,7 +163,7 @@
   // alloc space so that we won't mix thread local runs from different
   // alloc spaces.
   RevokeAllThreadLocalBuffers();
-  SetEnd(reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
+  SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
   DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
   DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
   DCHECK(IsAligned<kPageSize>(begin_));
@@ -194,7 +194,7 @@
   void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
                                     low_memory_mode);
   // Protect memory beyond the initial size.
-  byte* end = mem_map->Begin() + starting_size_;
+  uint8_t* end = mem_map->Begin() + starting_size_;
   if (capacity > initial_size_) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
   }
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bace3f6..7230116 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -115,7 +115,7 @@
   void SetGrowthLimit(size_t growth_limit);
 
   virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                                      byte* begin, byte* end, byte* limit, size_t growth_limit,
+                                      uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                                       bool can_move_objects) = 0;
 
   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
@@ -138,12 +138,12 @@
   }
 
  protected:
-  MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
-              byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
+  MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
+              uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
               size_t starting_size, size_t initial_size);
 
   static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                              size_t* growth_limit, size_t* capacity, byte* requested_begin);
+                              size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 3f39c77..d25694a 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -42,8 +42,8 @@
 // template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
 RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
-                             art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
-                             byte* limit, size_t growth_limit, bool can_move_objects,
+                             art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
+                             uint8_t* limit, size_t growth_limit, bool can_move_objects,
                              size_t starting_size, size_t initial_size, bool low_memory_mode)
     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
                   starting_size, initial_size),
@@ -64,13 +64,13 @@
   }
 
   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
-  byte* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map->Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
   }
 
   // Everything is set so record in immutable structure and leave
-  byte* begin = mem_map->Begin();
+  uint8_t* begin = mem_map->Begin();
   // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
@@ -86,7 +86,7 @@
 }
 
 RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, byte* requested_begin,
+                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
                                      bool low_memory_mode, bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -164,7 +164,7 @@
 }
 
 MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                                           byte* begin, byte* end, byte* limit, size_t growth_limit,
+                                           uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
   return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
                            begin, end, limit, growth_limit, can_move_objects, starting_size_,
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index f1ce115..46fffaa 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -39,7 +39,7 @@
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
   static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, byte* requested_begin, bool low_memory_mode,
+                               size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
                                bool can_move_objects);
   static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
                                          size_t starting_size, size_t initial_size,
@@ -93,7 +93,7 @@
   void Clear() OVERRIDE;
 
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
-                              byte* begin, byte* end, byte* limit, size_t growth_limit,
+                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                               bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -127,7 +127,7 @@
 
  protected:
   RosAllocSpace(const std::string& name, MemMap* mem_map, allocator::RosAlloc* rosalloc,
-                byte* begin, byte* end, byte* limit, size_t growth_limit, bool can_move_objects,
+                uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects,
                 size_t starting_size, size_t initial_size, bool low_memory_mode);
 
  private:
diff --git a/runtime/gc/space/rosalloc_space_base_test.cc b/runtime/gc/space/rosalloc_space_base_test.cc
index c3157fa..0c5be03 100644
--- a/runtime/gc/space/rosalloc_space_base_test.cc
+++ b/runtime/gc/space/rosalloc_space_base_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index 864bbc9..ca3aff4 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index c0e2ac8..a78623e 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -21,7 +21,7 @@
 namespace space {
 
 MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, byte* requested_begin) {
+                                 size_t capacity, uint8_t* requested_begin) {
   return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
                                Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
 }
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 523d4fe..860a4c9 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -246,27 +246,27 @@
 class ContinuousSpace : public Space {
  public:
   // Address at which the space begins.
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return begin_;
   }
 
   // Current address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
+  uint8_t* End() const {
     return end_.LoadRelaxed();
   }
 
   // The end of the address range covered by the space.
-  byte* Limit() const {
+  uint8_t* Limit() const {
     return limit_;
   }
 
   // Change the end of the space. Be careful with use since changing the end of a space to an
   // invalid value may break the GC.
-  void SetEnd(byte* end) {
+  void SetEnd(uint8_t* end) {
     end_.StoreRelaxed(end);
   }
 
-  void SetLimit(byte* limit) {
+  void SetLimit(uint8_t* limit) {
     limit_ = limit;
   }
 
@@ -286,7 +286,7 @@
   // Is object within this space? We check to see if the pointer is beyond the end first as
   // continuous spaces are iterated over from low to high.
   bool HasAddress(const mirror::Object* obj) const {
-    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+    const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
     return byte_ptr >= Begin() && byte_ptr < Limit();
   }
 
@@ -302,18 +302,18 @@
 
  protected:
   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
-                  byte* begin, byte* end, byte* limit) :
+                  uint8_t* begin, uint8_t* end, uint8_t* limit) :
       Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
   }
 
   // The beginning of the storage for fast access.
-  byte* begin_;
+  uint8_t* begin_;
 
   // Current end of the space.
-  Atomic<byte*> end_;
+  Atomic<uint8_t*> end_;
 
   // Limit of the space.
-  byte* limit_;
+  uint8_t* limit_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -369,7 +369,7 @@
   }
 
  protected:
-  MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+  MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit,
               GcRetentionPolicy gc_retention_policy)
       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
         mem_map_(mem_map) {
@@ -425,8 +425,8 @@
   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
 
-  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
-                             byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
+                             uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
       : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
   }
 
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 7211bb4..9f39b80 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -110,7 +110,7 @@
   }
 
   typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
-                                        size_t capacity, byte* requested_begin);
+                                        size_t capacity, uint8_t* requested_begin);
   void InitTestBody(CreateSpaceFn create_space);
   void ZygoteSpaceTestBody(CreateSpaceFn create_space);
   void AllocAndFreeTestBody(CreateSpaceFn create_space);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index 966c276..a6b837c 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -39,10 +39,10 @@
     return nullptr;
   }
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
   // Make redzones as no access.
   VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
   return result;
 }
 
@@ -56,24 +56,24 @@
     return nullptr;
   }
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
   // Make redzones as no access.
   VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
   return result;
 }
 
 template <typename S, typename A>
 size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) {
   size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes), usable_size);
+      reinterpret_cast<uint8_t*>(obj) - kValgrindRedZoneBytes), usable_size);
   return result;
 }
 
 template <typename S, typename A>
 size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
-  void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+  void* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
   // Make redzones undefined.
   size_t usable_size = 0;
   AllocationSize(ptr, &usable_size);
@@ -93,8 +93,8 @@
 
 template <typename S, typename A>
 ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
-                                               A allocator, byte* begin,
-                                               byte* end, byte* limit, size_t growth_limit,
+                                               A allocator, uint8_t* begin,
+                                               uint8_t* end, uint8_t* limit, size_t growth_limit,
                                                size_t initial_size,
                                                bool can_move_objects, size_t starting_size) :
     S(name, mem_map, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size,
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 200ad83..eb6fe9c 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -47,7 +47,7 @@
   }
 
   ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
-                      byte* begin, byte* end, byte* limit, size_t growth_limit,
+                      uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
                       size_t initial_size, bool can_move_objects, size_t starting_size);
   virtual ~ValgrindMallocSpace() {}
 
diff --git a/runtime/globals.h b/runtime/globals.h
index 107e064..b7bd44d 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -24,22 +24,14 @@
 
 namespace art {
 
-typedef uint8_t byte;
-typedef intptr_t word;
-typedef uintptr_t uword;
-
 static constexpr size_t KB = 1024;
 static constexpr size_t MB = KB * KB;
 static constexpr size_t GB = KB * KB * KB;
 
 // Runtime sizes.
-static constexpr size_t kWordSize = sizeof(word);
-static constexpr size_t kPointerSize = sizeof(void*);
-
 static constexpr size_t kBitsPerByte = 8;
 static constexpr size_t kBitsPerByteLog2 = 3;
-static constexpr int kBitsPerWord = kWordSize * kBitsPerByte;
-static constexpr size_t kWordHighBitMask = static_cast<size_t>(1) << (kBitsPerWord - 1);
+static constexpr int kBitsPerIntPtrT = sizeof(intptr_t) * kBitsPerByte;
 
 // Required stack alignment
 static constexpr size_t kStackAlignment = 16;
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index de563c1..7afd279 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -46,17 +46,17 @@
   test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678));
   test_table.SetNumberOfReferences(0x9ABC);
 
-  byte* table_base_ptr = reinterpret_cast<byte*>(&test_table);
+  uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table);
 
   {
     uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
-        HandleScope::LinkOffset(kPointerSize));
+        HandleScope::LinkOffset(sizeof(void*)));
     EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
   }
 
   {
     uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
-        HandleScope::NumberOfReferencesOffset(kPointerSize));
+        HandleScope::NumberOfReferencesOffset(sizeof(void*)));
     EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
   }
 
@@ -66,7 +66,7 @@
     EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
 
     uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
-        HandleScope::ReferencesOffset(kPointerSize));
+        HandleScope::ReferencesOffset(sizeof(void*)));
     EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
   }
 }
diff --git a/runtime/image.cc b/runtime/image.cc
index f451df9..c065d8e 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -23,8 +23,8 @@
 
 namespace art {
 
-const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '1', '0', '\0' };
+const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '0', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 424a40b..ec95d01 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -44,8 +44,8 @@
   bool IsValid() const;
   const char* GetMagic() const;
 
-  byte* GetImageBegin() const {
-    return reinterpret_cast<byte*>(image_begin_);
+  uint8_t* GetImageBegin() const {
+    return reinterpret_cast<uint8_t*>(image_begin_);
   }
 
   size_t GetImageSize() const {
@@ -68,20 +68,20 @@
     oat_checksum_ = oat_checksum;
   }
 
-  byte* GetOatFileBegin() const {
-    return reinterpret_cast<byte*>(oat_file_begin_);
+  uint8_t* GetOatFileBegin() const {
+    return reinterpret_cast<uint8_t*>(oat_file_begin_);
   }
 
-  byte* GetOatDataBegin() const {
-    return reinterpret_cast<byte*>(oat_data_begin_);
+  uint8_t* GetOatDataBegin() const {
+    return reinterpret_cast<uint8_t*>(oat_data_begin_);
   }
 
-  byte* GetOatDataEnd() const {
-    return reinterpret_cast<byte*>(oat_data_end_);
+  uint8_t* GetOatDataEnd() const {
+    return reinterpret_cast<uint8_t*>(oat_data_end_);
   }
 
-  byte* GetOatFileEnd() const {
-    return reinterpret_cast<byte*>(oat_file_end_);
+  uint8_t* GetOatFileEnd() const {
+    return reinterpret_cast<uint8_t*>(oat_file_end_);
   }
 
   off_t GetPatchDelta() const {
@@ -121,11 +121,11 @@
   void RelocateImage(off_t delta);
 
  private:
-  static const byte kImageMagic[4];
-  static const byte kImageVersion[4];
+  static const uint8_t kImageMagic[4];
+  static const uint8_t kImageVersion[4];
 
-  byte magic_[4];
-  byte version_[4];
+  uint8_t magic_[4];
+  uint8_t version_[4];
 
   // Required base address for mapping the image.
   uint32_t image_begin_;
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
index ac17c4f..80191b1 100644
--- a/runtime/instruction_set_test.cc
+++ b/runtime/instruction_set_test.cc
@@ -47,7 +47,7 @@
 }
 
 TEST_F(InstructionSetTest, PointerSize) {
-  EXPECT_EQ(kPointerSize, GetInstructionSetPointerSize(kRuntimeISA));
+  EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
 }
 
 }  // namespace art
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index d755cb9..231e9e5 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -189,7 +189,7 @@
 // non-null, we check that pointer is the actual_ptr == expected_ptr,
 // and if not, report in error_msg what the conflict mapping was if
 // found, or a generic error in other cases.
-static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
+static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
                             std::string* error_msg) {
   // Handled first by caller for more specific error messages.
   CHECK(actual_ptr != MAP_FAILED);
@@ -234,7 +234,7 @@
   return false;
 }
 
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
                              bool low_4gb, std::string* error_msg) {
   if (byte_count == 0) {
     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
@@ -377,11 +377,11 @@
   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
-  return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
+  return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
                     page_aligned_byte_count, prot, false);
 }
 
-MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
                                  off_t start, bool reuse, const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
@@ -414,9 +414,9 @@
   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
   // not necessarily to virtual memory. mmap will page align 'expected' for us.
-  byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+  uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
-  byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
+  uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
                                               prot,
                                               flags,
@@ -468,7 +468,7 @@
   CHECK(found) << "MemMap not found";
 }
 
-MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
+MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
                size_t base_size, int prot, bool reuse)
     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
       prot_(prot), reuse_(reuse) {
@@ -487,27 +487,27 @@
   }
 }
 
-MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
                            std::string* error_msg) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
-  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
   DCHECK(IsAligned<kPageSize>(begin_));
   DCHECK(IsAligned<kPageSize>(base_begin_));
-  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+  DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_));
   DCHECK(IsAligned<kPageSize>(new_end));
-  byte* old_end = begin_ + size_;
-  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
-  byte* new_base_end = new_end;
+  uint8_t* old_end = begin_ + size_;
+  uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
+  uint8_t* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
   }
-  size_ = new_end - reinterpret_cast<byte*>(begin_);
-  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
-  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
+  base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
   size_t tail_size = old_end - new_end;
-  byte* tail_base_begin = new_base_end;
+  uint8_t* tail_base_begin = new_base_end;
   size_t tail_base_size = old_base_end - new_base_end;
   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
   DCHECK(IsAligned<kPageSize>(tail_base_size));
@@ -543,7 +543,7 @@
   // calls. Otherwise, libc (or something else) might take this memory
   // region. Note this isn't perfect as there's no way to prevent
   // other threads to try to take this memory region here.
-  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+  uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
                                               flags, fd.get(), 0));
   if (actual == MAP_FAILED) {
     std::string maps;
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index e49ed48..314bf8d 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -60,7 +60,7 @@
   // a name.
   //
   // On success, returns returns a MemMap instance.  On failure, returns a NULL;
-  static MemMap* MapAnonymous(const char* ashmem_name, byte* addr, size_t byte_count, int prot,
+  static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
                               bool low_4gb, std::string* error_msg);
 
   // Map part of a file, taking care of non-page aligned offsets.  The
@@ -80,7 +80,7 @@
   //
   // On success, returns returns a MemMap instance.  On failure, returns a
   // nullptr;
-  static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
+  static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
                                   off_t start, bool reuse, const char* filename,
                                   std::string* error_msg);
 
@@ -99,7 +99,7 @@
     return prot_;
   }
 
-  byte* Begin() const {
+  uint8_t* Begin() const {
     return begin_;
   }
 
@@ -107,7 +107,7 @@
     return size_;
   }
 
-  byte* End() const {
+  uint8_t* End() const {
     return Begin() + Size();
   }
 
@@ -120,7 +120,7 @@
   }
 
   void* BaseEnd() const {
-    return reinterpret_cast<byte*>(BaseBegin()) + BaseSize();
+    return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
   }
 
   bool HasAddress(const void* addr) const {
@@ -128,7 +128,7 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
-  MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+  MemMap* RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
                      std::string* error_msg);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
@@ -139,7 +139,7 @@
   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
 
  private:
-  MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
+  MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
          int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   static void DumpMapsLocked(std::ostream& os)
@@ -150,7 +150,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
 
   const std::string name_;
-  byte* const begin_;  // Start of data.
+  uint8_t* const begin_;  // Start of data.
   size_t size_;  // Length of data.
 
   void* const base_begin_;  // Page-aligned base address.
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index e54d0e0..a78f463 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -26,8 +26,8 @@
 
 class MemMapTest : public testing::Test {
  public:
-  static byte* BaseBegin(MemMap* mem_map) {
-    return reinterpret_cast<byte*>(mem_map->base_begin_);
+  static uint8_t* BaseBegin(MemMap* mem_map) {
+    return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
   }
   static size_t BaseSize(MemMap* mem_map) {
     return mem_map->base_size_;
@@ -45,7 +45,7 @@
                                       low_4gb,
                                       &error_msg);
     // Check its state and write to it.
-    byte* base0 = m0->Begin();
+    uint8_t* base0 = m0->Begin();
     ASSERT_TRUE(base0 != nullptr) << error_msg;
     size_t size0 = m0->Size();
     EXPECT_EQ(m0->Size(), 2 * page_size);
@@ -62,7 +62,7 @@
     EXPECT_EQ(m0->Size(), page_size);
     EXPECT_EQ(BaseBegin(m0), base0);
     EXPECT_EQ(BaseSize(m0), page_size);
-    byte* base1 = m1->Begin();
+    uint8_t* base1 = m1->Begin();
     size_t size1 = m1->Size();
     EXPECT_EQ(base1, base0 + page_size);
     EXPECT_EQ(size1, page_size);
@@ -160,7 +160,7 @@
   std::string error_msg;
   // Map at an address that should work, which should succeed.
   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                              reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+                                              reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
                                               kPageSize,
                                               PROT_READ | PROT_WRITE,
                                               false,
@@ -180,7 +180,7 @@
   ASSERT_TRUE(map1->BaseBegin() != nullptr);
   // Attempt to map at the same address, which should fail.
   std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                              reinterpret_cast<byte*>(map1->BaseBegin()),
+                                              reinterpret_cast<uint8_t*>(map1->BaseBegin()),
                                               kPageSize,
                                               PROT_READ | PROT_WRITE,
                                               false,
@@ -205,7 +205,7 @@
     uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
     std::string error_msg;
     std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
-                                                     reinterpret_cast<byte*>(start_addr),
+                                                     reinterpret_cast<uint8_t*>(start_addr),
                                                      0x21000000,
                                                      PROT_READ | PROT_WRITE,
                                                      true,
@@ -221,7 +221,7 @@
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
-                                             reinterpret_cast<byte*>(ptr),
+                                             reinterpret_cast<uint8_t*>(ptr),
                                              2 * kPageSize,  // brings it over the top.
                                              PROT_READ | PROT_WRITE,
                                              false,
@@ -234,7 +234,7 @@
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
-                                             reinterpret_cast<byte*>(UINT64_C(0x100000000)),
+                                             reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
                                              kPageSize,
                                              PROT_READ | PROT_WRITE,
                                              true,
@@ -246,7 +246,7 @@
 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
-                                             reinterpret_cast<byte*>(0xF0000000),
+                                             reinterpret_cast<uint8_t*>(0xF0000000),
                                              0x20000000,
                                              PROT_READ | PROT_WRITE,
                                              true,
@@ -269,7 +269,7 @@
   ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   // Record the base address.
-  byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
+  uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
   // Unmap it.
   map.reset();
 
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index bab2e86..6459963 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -31,7 +31,7 @@
 class MemoryRegion {
  public:
   MemoryRegion() : pointer_(NULL), size_(0) {}
-  MemoryRegion(void* pointer, uword size) : pointer_(pointer), size_(size) {}
+  MemoryRegion(void* pointer, uintptr_t size) : pointer_(pointer), size_(size) {}
 
   void* pointer() const { return pointer_; }
   size_t size() const { return size_; }
@@ -41,8 +41,8 @@
     return OFFSETOF_MEMBER(MemoryRegion, pointer_);
   }
 
-  byte* start() const { return reinterpret_cast<byte*>(pointer_); }
-  byte* end() const { return start() + size_; }
+  uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); }
+  uint8_t* end() const { return start() + size_; }
 
   template<typename T> T Load(uintptr_t offset) const {
     return *ComputeInternalPointer<T>(offset);
@@ -98,11 +98,11 @@
 
   // Locate the bit with the given offset. Returns a pointer to the byte
   // containing the bit, and sets bit_mask to the bit within that byte.
-  byte* ComputeBitPointer(uintptr_t bit_offset, byte* bit_mask) const {
+  uint8_t* ComputeBitPointer(uintptr_t bit_offset, uint8_t* bit_mask) const {
     uintptr_t bit_remainder = (bit_offset & (kBitsPerByte - 1));
     *bit_mask = (1U << bit_remainder);
     uintptr_t byte_offset = (bit_offset >> kBitsPerByteLog2);
-    return ComputeInternalPointer<byte>(byte_offset);
+    return ComputeInternalPointer<uint8_t>(byte_offset);
   }
 
   void* pointer_;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 13b5a8b..7e1ad78 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -136,10 +136,10 @@
     // DCHECK(array->IsArrayInstance());
     int32_t length = (usable_size - header_size_) >> component_size_shift_;
     DCHECK_GE(length, minimum_length_);
-    byte* old_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
-                                                              minimum_length_));
-    byte* new_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
-                                                              length));
+    uint8_t* old_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+                                                                    minimum_length_));
+    uint8_t* new_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+                                                                    length));
     // Ensure space beyond original allocation is zeroed.
     memset(old_end, 0, new_end - old_end);
     array->SetLength(length);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 8447616..1a65d99 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -393,7 +393,7 @@
 
     // Callee saves + handle scope + method ref + alignment
     size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
-                                - kPointerSize  // callee-save frame stores a whole method pointer
+                                - sizeof(void*)  // callee-save frame stores a whole method pointer
                                 + sizeof(StackReference<mirror::ArtMethod>),
                                 kStackAlignment);
 
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index de6ec05..939d856 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -385,11 +385,11 @@
   size_t GetReturnPcOffsetInBytes(uint32_t frame_size_in_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
-    return frame_size_in_bytes - kPointerSize;
+    return frame_size_in_bytes - sizeof(void*);
   }
 
   size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return kPointerSize;
+    return sizeof(void*);
   }
 
   void RegisterNative(Thread* self, const void* native_method, bool is_fast)
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 4d5f621..b89da9d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -141,7 +141,7 @@
 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
   DCHECK(kUseBakerOrBrooksReadBarrier);
   MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
-  byte* raw_addr = reinterpret_cast<byte*>(this) + offset.SizeValue();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
   Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
   HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
   HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
@@ -602,7 +602,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
 
   return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
@@ -620,7 +620,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
 
   return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value);
@@ -638,7 +638,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
 
   return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
@@ -682,7 +682,7 @@
 
 template<typename kSize, bool kIsVolatile>
 inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   kSize* addr = reinterpret_cast<kSize*>(raw_addr);
   if (kIsVolatile) {
     reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
@@ -693,7 +693,7 @@
 
 template<typename kSize, bool kIsVolatile>
 inline kSize Object::GetField(MemberOffset field_offset) {
-  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+  const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
   const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
   if (kIsVolatile) {
     return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
@@ -714,7 +714,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
   return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
 }
@@ -731,7 +731,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
   return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
 }
@@ -742,7 +742,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
   T* result = ReadBarrier::Barrier<T, kReadBarrierOption>(this, field_offset, objref_addr);
   if (kIsVolatile) {
@@ -782,7 +782,7 @@
   if (kVerifyFlags & kVerifyWrites) {
     VerifyObject(new_value);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
   if (kIsVolatile) {
     // TODO: Refactor to use a SequentiallyConsistent store instead.
@@ -818,7 +818,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
+  return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<uint8_t*>(this) +
       field_offset.Int32Value());
 }
 
@@ -842,7 +842,7 @@
   }
   HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
   HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
 
   bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
@@ -874,7 +874,7 @@
   }
   HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
   HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
 
   bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 57069ab..9578c97 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -69,8 +69,8 @@
                            size_t num_bytes) {
   // Copy instance data.  We assume memcpy copies by words.
   // TODO: expose and use move32.
-  byte* src_bytes = reinterpret_cast<byte*>(src);
-  byte* dst_bytes = reinterpret_cast<byte*>(dest);
+  uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src);
+  uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest);
   size_t offset = sizeof(Object);
   memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
   if (kUseBakerOrBrooksReadBarrier) {
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index cb45162..5b92093 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -172,7 +172,7 @@
   // To avoid race issues when resizing, we keep all the previous arrays.
   std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
 
-  typedef TrackingAllocator<byte, kAllocatorTagMonitorPool> Allocator;
+  typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
   Allocator allocator_;
 
   // Start of free list of monitors.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 23f46f4..ec7d82d 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -464,7 +464,7 @@
            class_def_index < dex_file->NumClassDefs();
            class_def_index++) {
         const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
-        const byte* class_data = dex_file->GetClassData(class_def);
+        const uint8_t* class_data = dex_file->GetClassData(class_def);
         if (class_data == NULL) {
           continue;
         }
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 9570bb5..6237767 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -35,7 +35,7 @@
   if (method_header == nullptr) {
     return 0u;
   }
-  return reinterpret_cast<const byte*>(method_header) - begin_;
+  return reinterpret_cast<const uint8_t*>(method_header) - begin_;
 }
 
 inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
@@ -51,7 +51,7 @@
   if (method_header == nullptr) {
     return 0u;
   }
-  return reinterpret_cast<const byte*>(&method_header->code_size_) - begin_;
+  return reinterpret_cast<const uint8_t*>(&method_header->code_size_) - begin_;
 }
 
 inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
@@ -88,7 +88,7 @@
   if (method_header == nullptr) {
     return 0u;
   }
-  return reinterpret_cast<const byte*>(&method_header->mapping_table_offset_) - begin_;
+  return reinterpret_cast<const uint8_t*>(&method_header->mapping_table_offset_) - begin_;
 }
 
 inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
@@ -101,7 +101,7 @@
   if (method_header == nullptr) {
     return 0u;
   }
-  return reinterpret_cast<const byte*>(&method_header->vmap_table_offset_) - begin_;
+  return reinterpret_cast<const uint8_t*>(&method_header->vmap_table_offset_) - begin_;
 }
 
 inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a8a8307..03a398e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -68,7 +68,7 @@
 
 OatFile* OatFile::Open(const std::string& filename,
                        const std::string& location,
-                       byte* requested_base,
+                       uint8_t* requested_base,
                        bool executable,
                        std::string* error_msg) {
   CHECK(!filename.empty()) << location;
@@ -114,7 +114,7 @@
 
 OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
                              const std::string& location,
-                             byte* requested_base,
+                             uint8_t* requested_base,
                              std::string* error_msg) {
   std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
   bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
@@ -126,7 +126,7 @@
 
 OatFile* OatFile::OpenElfFile(File* file,
                               const std::string& location,
-                              byte* requested_base,
+                              uint8_t* requested_base,
                               bool writable,
                               bool executable,
                               std::string* error_msg) {
@@ -153,7 +153,7 @@
   }
 }
 
-bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
+bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
                      std::string* error_msg) {
   char* absolute_path = realpath(elf_filename.c_str(), NULL);
   if (absolute_path == NULL) {
@@ -166,7 +166,7 @@
     *error_msg = StringPrintf("Failed to dlopen '%s': %s", elf_filename.c_str(), dlerror());
     return false;
   }
-  begin_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatdata"));
+  begin_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatdata"));
   if (begin_ == NULL) {
     *error_msg = StringPrintf("Failed to find oatdata symbol in '%s': %s", elf_filename.c_str(),
                               dlerror());
@@ -179,7 +179,7 @@
     ReadFileToString("/proc/self/maps", error_msg);
     return false;
   }
-  end_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatlastword"));
+  end_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatlastword"));
   if (end_ == NULL) {
     *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s': %s", elf_filename.c_str(),
                               dlerror());
@@ -190,7 +190,7 @@
   return Setup(error_msg);
 }
 
-bool OatFile::ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
                           std::string* error_msg) {
   elf_file_.reset(ElfFile::Open(file, writable, true, error_msg));
   if (elf_file_.get() == nullptr) {
@@ -229,7 +229,7 @@
     *error_msg = StringPrintf("Invalid oat magic for '%s'", GetLocation().c_str());
     return false;
   }
-  const byte* oat = Begin();
+  const uint8_t* oat = Begin();
   oat += sizeof(OatHeader);
   if (oat > End()) {
     *error_msg = StringPrintf("In oat file '%s' found truncated OatHeader", GetLocation().c_str());
@@ -350,12 +350,12 @@
   return *reinterpret_cast<const OatHeader*>(Begin());
 }
 
-const byte* OatFile::Begin() const {
+const uint8_t* OatFile::Begin() const {
   CHECK(begin_ != NULL);
   return begin_;
 }
 
-const byte* OatFile::End() const {
+const uint8_t* OatFile::End() const {
   CHECK(end_ != NULL);
   return end_;
 }
@@ -436,7 +436,7 @@
                                 const std::string& dex_file_location,
                                 const std::string& canonical_dex_file_location,
                                 uint32_t dex_file_location_checksum,
-                                const byte* dex_file_pointer,
+                                const uint8_t* dex_file_pointer,
                                 const uint32_t* oat_class_offsets_pointer)
     : oat_file_(oat_file),
       dex_file_location_(dex_file_location),
@@ -463,26 +463,26 @@
 OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
   uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
 
-  const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
+  const uint8_t* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
   CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
 
-  const byte* status_pointer = oat_class_pointer;
+  const uint8_t* status_pointer = oat_class_pointer;
   CHECK_LT(status_pointer, oat_file_->End()) << oat_file_->GetLocation();
   mirror::Class::Status status =
       static_cast<mirror::Class::Status>(*reinterpret_cast<const int16_t*>(status_pointer));
   CHECK_LT(status, mirror::Class::kStatusMax);
 
-  const byte* type_pointer = status_pointer + sizeof(uint16_t);
+  const uint8_t* type_pointer = status_pointer + sizeof(uint16_t);
   CHECK_LT(type_pointer, oat_file_->End()) << oat_file_->GetLocation();
   OatClassType type = static_cast<OatClassType>(*reinterpret_cast<const uint16_t*>(type_pointer));
   CHECK_LT(type, kOatClassMax);
 
-  const byte* after_type_pointer = type_pointer + sizeof(int16_t);
+  const uint8_t* after_type_pointer = type_pointer + sizeof(int16_t);
   CHECK_LE(after_type_pointer, oat_file_->End()) << oat_file_->GetLocation();
 
   uint32_t bitmap_size = 0;
-  const byte* bitmap_pointer = nullptr;
-  const byte* methods_pointer = nullptr;
+  const uint8_t* bitmap_pointer = nullptr;
+  const uint8_t* methods_pointer = nullptr;
   if (type != kOatClassNoneCompiled) {
     if (type == kOatClassSomeCompiled) {
       bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b9d5702..734b9b3 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@
   // optionally be used to request where the file should be loaded.
   static OatFile* Open(const std::string& filename,
                        const std::string& location,
-                       byte* requested_base,
+                       uint8_t* requested_base,
                        bool executable,
                        std::string* error_msg);
 
@@ -148,7 +148,7 @@
     uint32_t GetVmapTableOffsetOffset() const;
 
     // Create an OatMethod with offsets relative to the given base address
-    OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
+    OatMethod(const uint8_t* base, const uint32_t code_offset, const uint32_t gc_map_offset)
       : begin_(base),
         code_offset_(code_offset),
         native_gc_map_offset_(gc_map_offset) {
@@ -170,7 +170,7 @@
       return reinterpret_cast<T>(begin_ + offset);
     }
 
-    const byte* const begin_;
+    const uint8_t* const begin_;
 
     const uint32_t code_offset_;
     const uint32_t native_gc_map_offset_;
@@ -272,14 +272,14 @@
                const std::string& dex_file_location,
                const std::string& canonical_dex_file_location,
                uint32_t dex_file_checksum,
-               const byte* dex_file_pointer,
+               const uint8_t* dex_file_pointer,
                const uint32_t* oat_class_offsets_pointer);
 
     const OatFile* const oat_file_;
     const std::string dex_file_location_;
     const std::string canonical_dex_file_location_;
     const uint32_t dex_file_location_checksum_;
-    const byte* const dex_file_pointer_;
+    const uint8_t* const dex_file_pointer_;
     const uint32_t* const oat_class_offsets_pointer_;
 
     friend class OatFile;
@@ -299,27 +299,27 @@
     return End() - Begin();
   }
 
-  const byte* Begin() const;
-  const byte* End() const;
+  const uint8_t* Begin() const;
+  const uint8_t* End() const;
 
  private:
   static void CheckLocation(const std::string& location);
 
   static OatFile* OpenDlopen(const std::string& elf_filename,
                              const std::string& location,
-                             byte* requested_base,
+                             uint8_t* requested_base,
                              std::string* error_msg);
 
   static OatFile* OpenElfFile(File* file,
                               const std::string& location,
-                              byte* requested_base,
+                              uint8_t* requested_base,
                               bool writable,
                               bool executable,
                               std::string* error_msg);
 
   explicit OatFile(const std::string& filename, bool executable);
-  bool Dlopen(const std::string& elf_filename, byte* requested_base, std::string* error_msg);
-  bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+  bool Dlopen(const std::string& elf_filename, uint8_t* requested_base, std::string* error_msg);
+  bool ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
                    std::string* error_msg);
   bool Setup(std::string* error_msg);
 
@@ -329,10 +329,10 @@
   const std::string location_;
 
   // Pointer to OatHeader.
-  const byte* begin_;
+  const uint8_t* begin_;
 
   // Pointer to end of oat region for bounds checking.
-  const byte* end_;
+  const uint8_t* end_;
 
   // Was this oat_file loaded executable?
   const bool is_executable_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b8b10d2..008941f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -392,16 +392,16 @@
 }
 
 uintptr_t StackVisitor::GetReturnPc() const {
-  byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
+  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   DCHECK(sp != NULL);
-  byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
-  byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
+  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   CHECK(sp != NULL);
-  byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
 
@@ -544,7 +544,7 @@
         size_t frame_size = method->GetFrameSizeInBytes();
         // Compute PC for next stack frame from return PC.
         size_t return_pc_offset = method->GetReturnPcOffsetInBytes(frame_size);
-        byte* return_pc_addr = reinterpret_cast<byte*>(cur_quick_frame_) + return_pc_offset;
+        uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
         uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
         if (UNLIKELY(exit_stubs_installed)) {
           // While profiling, the return pc is restored from the side stack, except when walking
@@ -574,7 +574,7 @@
           }
         }
         cur_quick_frame_pc_ = return_pc;
-        byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
+        uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
         cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
         cur_depth_++;
         method = cur_quick_frame_->AsMirrorPtr();
diff --git a/runtime/stack.h b/runtime/stack.h
index 44e36c4..25e50a1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -484,10 +484,10 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Callee saves are held at the top of the frame
     DCHECK(GetMethod() != nullptr);
-    byte* save_addr =
-        reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
+    uint8_t* save_addr =
+        reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*));
 #if defined(__i386__) || defined(__x86_64__)
-    save_addr -= kPointerSize;  // account for return address
+    save_addr -= sizeof(void*);  // account for return address
 #endif
     return reinterpret_cast<uintptr_t*>(save_addr);
   }
@@ -557,7 +557,7 @@
                         uint16_t vreg) const {
     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
-    byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
+    uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset;
     return reinterpret_cast<uint32_t*>(vreg_addr);
   }
 
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 170cec6..e1b5b91 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -198,9 +198,9 @@
   DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
   if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
     // There's room.
-    DCHECK_LE(reinterpret_cast<byte*>(tlsPtr_.thread_local_alloc_stack_top) +
+    DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
                   sizeof(mirror::Object*),
-              reinterpret_cast<byte*>(tlsPtr_.thread_local_alloc_stack_end));
+              reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
     DCHECK(*tlsPtr_.thread_local_alloc_stack_top == nullptr);
     *tlsPtr_.thread_local_alloc_stack_top = obj;
     ++tlsPtr_.thread_local_alloc_stack_top;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 07657d1..b0c8fe1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -235,7 +235,7 @@
 }
 
 // Global variable to prevent the compiler optimizing away the page reads for the stack.
-byte dont_optimize_this;
+uint8_t dont_optimize_this;
 
 // Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
 // overflow is detected.  It is located right below the stack_begin_.
@@ -249,9 +249,9 @@
 // this by reading every page from the stack bottom (highest address) to the stack top.
 // We then madvise this away.
 void Thread::InstallImplicitProtection() {
-  byte* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
-  byte* stack_himem = tlsPtr_.stack_end;
-  byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&stack_himem) &
+  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+  uint8_t* stack_himem = tlsPtr_.stack_end;
+  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
       ~(kPageSize - 1));    // Page containing current top of stack.
 
   // First remove the protection on the protected region as will want to read and
@@ -265,7 +265,7 @@
   // a segv.
 
   // Read every page from the high address to the low.
-  for (byte* p = stack_top; p >= pregion; p -= kPageSize) {
+  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
     dont_optimize_this = *p;
   }
 
@@ -496,7 +496,7 @@
                                 PrettySize(read_stack_size).c_str(),
                                 PrettySize(read_guard_size).c_str());
 
-  tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
+  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
   tlsPtr_.stack_size = read_stack_size;
 
   // The minimum stack size we can cope with is the overflow reserved bytes (typically
@@ -2264,7 +2264,7 @@
   }
 }
 
-void Thread::SetTlab(byte* start, byte* end) {
+void Thread::SetTlab(uint8_t* start, uint8_t* end) {
   DCHECK_LE(start, end);
   tlsPtr_.thread_local_start = start;
   tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
diff --git a/runtime/thread.h b/runtime/thread.h
index 6c427b8..998e472 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -584,7 +584,7 @@
     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
   }
 
-  byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
+  uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
     if (implicit_overflow_check) {
       // The interpreter needs the extra overflow bytes that stack_end does
       // not include.
@@ -594,7 +594,7 @@
     }
   }
 
-  byte* GetStackEnd() const {
+  uint8_t* GetStackEnd() const {
     return tlsPtr_.stack_end;
   }
 
@@ -790,7 +790,7 @@
   size_t TlabSize() const;
   // Doesn't check that there is room.
   mirror::Object* AllocTlab(size_t bytes);
-  void SetTlab(byte* start, byte* end);
+  void SetTlab(uint8_t* start, uint8_t* end);
   bool HasTlab() const;
 
   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
@@ -1043,14 +1043,14 @@
     }
 
     // The biased card table, see CardTable for details.
-    byte* card_table;
+    uint8_t* card_table;
 
     // The pending exception or NULL.
     mirror::Throwable* exception;
 
     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
     // We leave extra space so there's room for the code that throws StackOverflowError.
-    byte* stack_end;
+    uint8_t* stack_end;
 
     // The top of the managed stack often manipulated directly by compiler generated code.
     ManagedStack managed_stack;
@@ -1073,7 +1073,7 @@
     jobject jpeer;
 
     // The "lowest addressable byte" of the stack.
-    byte* stack_begin;
+    uint8_t* stack_begin;
 
     // Size of the stack.
     size_t stack_size;
@@ -1137,9 +1137,9 @@
     QuickEntryPoints quick_entrypoints;
 
     // Thread-local allocation pointer.
-    byte* thread_local_start;
-    byte* thread_local_pos;
-    byte* thread_local_end;
+    uint8_t* thread_local_start;
+    uint8_t* thread_local_pos;
+    uint8_t* thread_local_end;
     size_t thread_local_objects;
 
     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 0496d97..0688c1a 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -92,7 +92,7 @@
   // (On Mac OS 10.7, it's the end.)
   int stack_variable;
   if (stack_addr > &stack_variable) {
-    *stack_base = reinterpret_cast<byte*>(stack_addr) - *stack_size;
+    *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
   } else {
     *stack_base = stack_addr;
   }
@@ -1369,11 +1369,11 @@
 }
 
 bool IsDexMagic(uint32_t magic) {
-  return DexFile::IsMagicValid(reinterpret_cast<const byte*>(&magic));
+  return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic));
 }
 
 bool IsOatMagic(uint32_t magic) {
-  return (memcmp(reinterpret_cast<const byte*>(magic),
+  return (memcmp(reinterpret_cast<const uint8_t*>(magic),
                  OatHeader::kOatMagic,
                  sizeof(OatHeader::kOatMagic)) == 0);
 }
diff --git a/runtime/utils.h b/runtime/utils.h
index 4fcd380..53b49c8 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -108,23 +108,23 @@
   DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
 
 // Check whether an N-bit two's-complement representation can hold value.
-static inline bool IsInt(int N, word value) {
+static inline bool IsInt(int N, intptr_t value) {
   CHECK_LT(0, N);
-  CHECK_LT(N, kBitsPerWord);
-  word limit = static_cast<word>(1) << (N - 1);
+  CHECK_LT(N, kBitsPerIntPtrT);
+  intptr_t limit = static_cast<intptr_t>(1) << (N - 1);
   return (-limit <= value) && (value < limit);
 }
 
-static inline bool IsUint(int N, word value) {
+static inline bool IsUint(int N, intptr_t value) {
   CHECK_LT(0, N);
-  CHECK_LT(N, kBitsPerWord);
-  word limit = static_cast<word>(1) << N;
+  CHECK_LT(N, kBitsPerIntPtrT);
+  intptr_t limit = static_cast<intptr_t>(1) << N;
   return (0 <= value) && (value < limit);
 }
 
-static inline bool IsAbsoluteUint(int N, word value) {
+static inline bool IsAbsoluteUint(int N, intptr_t value) {
   CHECK_LT(0, N);
-  CHECK_LT(N, kBitsPerWord);
+  CHECK_LT(N, kBitsPerIntPtrT);
   if (value < 0) value = -value;
   return IsUint(N, value);
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9747b4e..fb07ba0 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -133,7 +133,7 @@
                                                         bool allow_soft_failures,
                                                         std::string* error) {
   DCHECK(class_def != nullptr);
-  const byte* class_data = dex_file->GetClassData(*class_def);
+  const uint8_t* class_data = dex_file->GetClassData(*class_def);
   if (class_data == nullptr) {
     // empty class, probably a marker interface
     return kNoFailure;
@@ -659,7 +659,7 @@
     }
   }
   // Iterate over each of the handlers to verify target addresses.
-  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+  const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
@@ -3012,7 +3012,7 @@
 const RegType& MethodVerifier::GetCaughtExceptionType() {
   const RegType* common_super = nullptr;
   if (code_item_->tries_size_ != 0) {
-    const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+    const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
     uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
     for (uint32_t i = 0; i < handlers_size; i++) {
       CatchHandlerIterator iterator(handlers_ptr);