Remove Frame, merge shadow and quick representations.

Change-Id: I5ae03a5e52111792d2df7e83cbd89ab25777844b
diff --git a/src/asm_support.h b/src/asm_support.h
index e776e53..415747e 100644
--- a/src/asm_support.h
+++ b/src/asm_support.h
@@ -29,15 +29,15 @@
 #define rSELF r9
 #define rLR r14
 // Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_SUSPEND_COUNT_OFFSET 124
+#define THREAD_SUSPEND_COUNT_OFFSET 0
 // Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 120
+#define THREAD_EXCEPTION_OFFSET 8
 
 #elif defined(__i386__)
 // Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 112
+#define THREAD_SELF_OFFSET 36
 // Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 120
+#define THREAD_EXCEPTION_OFFSET 8
 #endif
 
 #endif  // ART_SRC_ASM_SUPPORT_H_
diff --git a/src/check_jni.cc b/src/check_jni.cc
index 4da4b37..0fd5f6e 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -80,7 +80,7 @@
 
 static bool IsSirtLocalRef(JNIEnv* env, jobject localRef) {
   return GetIndirectRefKind(localRef) == kSirtOrInvalid &&
-      reinterpret_cast<JNIEnvExt*>(env)->self->StackReferencesContain(localRef);
+      reinterpret_cast<JNIEnvExt*>(env)->self->SirtContains(localRef);
 }
 
 template<typename T>
diff --git a/src/compiler.cc b/src/compiler.cc
index 8e24fc2..2633b78 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -923,9 +923,9 @@
   return true;
 }
 
-class Context {
+class CompilationContext {
  public:
-  Context(ClassLinker* class_linker,
+  CompilationContext(ClassLinker* class_linker,
           const ClassLoader* class_loader,
           Compiler* compiler,
           DexCache* dex_cache,
@@ -964,11 +964,11 @@
   const DexFile* dex_file_;
 };
 
-typedef void Callback(Context* context, size_t index);
+typedef void Callback(CompilationContext* context, size_t index);
 
 class WorkerThread {
  public:
-  WorkerThread(Context* context, size_t begin, size_t end, Callback callback, size_t stripe, bool spawn)
+  WorkerThread(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t stripe, bool spawn)
       : spawn_(spawn), context_(context), begin_(begin), end_(end), callback_(callback), stripe_(stripe) {
     if (spawn_) {
       // Mac OS stacks are only 512KiB. Make sure we have the same stack size on all platforms.
@@ -1017,16 +1017,16 @@
   pthread_t pthread_;
   bool spawn_;
 
-  Context* context_;
+  CompilationContext* context_;
   size_t begin_;
   size_t end_;
   Callback* callback_;
   size_t stripe_;
 
-  friend void ForAll(Context*, size_t, size_t, Callback, size_t);
+  friend void ForAll(CompilationContext*, size_t, size_t, Callback, size_t);
 };
 
-void ForAll(Context* context, size_t begin, size_t end, Callback callback, size_t thread_count) {
+void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t thread_count) {
   Thread* self = Thread::Current();
   CHECK(!self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
   CHECK_GT(thread_count, 0U);
@@ -1042,7 +1042,7 @@
   STLDeleteElements(&threads);
 }
 
-static void ResolveClassFieldsAndMethods(Context* context, size_t class_def_index) {
+static void ResolveClassFieldsAndMethods(CompilationContext* context, size_t class_def_index) {
   const DexFile& dex_file = *context->GetDexFile();
 
   // Method and Field are the worst. We can't resolve without either
@@ -1108,7 +1108,7 @@
   DCHECK(!it.HasNext());
 }
 
-static void ResolveType(Context* context, size_t type_idx) {
+static void ResolveType(CompilationContext* context, size_t type_idx) {
   // Class derived values are more complicated, they require the linker and loader.
   Thread* self = Thread::Current();
   Class* klass = context->GetClassLinker()->ResolveType(*context->GetDexFile(),
@@ -1135,7 +1135,7 @@
     timings.AddSplit("Resolve " + dex_file.GetLocation() + " Strings");
   }
 
-  Context context(class_linker, class_loader, this, dex_cache, &dex_file);
+  CompilationContext context(class_linker, class_loader, this, dex_cache, &dex_file);
   ForAll(&context, 0, dex_cache->NumResolvedTypes(), ResolveType, thread_count_);
   timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types");
 
@@ -1152,7 +1152,7 @@
   }
 }
 
-static void VerifyClass(Context* context, size_t class_def_index) {
+static void VerifyClass(CompilationContext* context, size_t class_def_index) {
   const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index);
   const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def);
   Class* klass = context->GetClassLinker()->FindClass(descriptor, context->GetClassLoader());
@@ -1194,7 +1194,7 @@
   dex_file.ChangePermissions(PROT_READ | PROT_WRITE);
 
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  Context context(class_linker, class_loader, this, class_linker->FindDexCache(dex_file), &dex_file);
+  CompilationContext context(class_linker, class_loader, this, class_linker->FindDexCache(dex_file), &dex_file);
   ForAll(&context, 0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
 
   dex_file.ChangePermissions(PROT_READ);
@@ -1255,7 +1255,7 @@
 // the future.
 class DexFilesWorkerThread {
  public:
-  DexFilesWorkerThread(Context *worker_context, Callback class_callback,
+  DexFilesWorkerThread(CompilationContext *worker_context, Callback class_callback,
                        const std::vector<const DexFile*>& dex_files,
                        volatile int32_t* shared_class_index, bool spawn)
       : spawn_(spawn), worker_context_(worker_context),
@@ -1308,10 +1308,10 @@
 
     // TODO: Add a callback to let the client specify the class_linker and
     //       dex_cache in the context for the current working dex file.
-    context_ = new Context(/* class_linker */NULL,
-                           worker_context_->GetClassLoader(),
-                           worker_context_->GetCompiler(),
-                           /* dex_cache */NULL, dex_file);
+    context_ = new CompilationContext(/* class_linker */NULL,
+                                      worker_context_->GetClassLoader(),
+                                      worker_context_->GetCompiler(),
+                                      /* dex_cache */NULL, dex_file);
 
     CHECK(context_ != NULL);
   }
@@ -1356,19 +1356,19 @@
   pthread_t pthread_;
   bool spawn_;
 
-  Context* worker_context_;
+  CompilationContext* worker_context_;
   Callback* class_callback_;
   const std::vector<const DexFile*>& dex_files_;
 
-  Context* context_;
+  CompilationContext* context_;
   volatile int32_t* shared_class_index_;
 
-  friend void ForClassesInAllDexFiles(Context*,
+  friend void ForClassesInAllDexFiles(CompilationContext*,
                                       const std::vector<const DexFile*>&,
                                       Callback, size_t);
 };
 
-void ForClassesInAllDexFiles(Context* worker_context,
+void ForClassesInAllDexFiles(CompilationContext* worker_context,
                              const std::vector<const DexFile*>& dex_files,
                              Callback class_callback, size_t thread_count) {
   Thread* self = Thread::Current();
@@ -1396,7 +1396,7 @@
   if (dex_files.size() <= 0) {
     return;  // No dex file
   }
-  Context context(NULL, class_loader, this, NULL, NULL);
+  CompilationContext context(NULL, class_loader, this, NULL, NULL);
   ForClassesInAllDexFiles(&context, dex_files, Compiler::CompileClass, thread_count_);
 #else
   for (size_t i = 0; i != dex_files.size(); ++i) {
@@ -1407,7 +1407,7 @@
 #endif
 }
 
-void Compiler::CompileClass(Context* context, size_t class_def_index) {
+void Compiler::CompileClass(CompilationContext* context, size_t class_def_index) {
   const ClassLoader* class_loader = context->GetClassLoader();
   const DexFile& dex_file = *context->GetDexFile();
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -1466,7 +1466,7 @@
 }
 
 void Compiler::CompileDexFile(const ClassLoader* class_loader, const DexFile& dex_file) {
-  Context context(NULL, class_loader, this, NULL, &dex_file);
+  CompilationContext context(NULL, class_loader, this, NULL, &dex_file);
   ForAll(&context, 0, dex_file.NumClassDefs(), Compiler::CompileClass, thread_count_);
 }
 
diff --git a/src/compiler.h b/src/compiler.h
index b36d807..8f5d5b4 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -36,7 +36,7 @@
 namespace art {
 
 class AOTCompilationStats;
-class Context;
+class CompilationContext;
 class OatCompilationUnit;
 class TimingLogger;
 
@@ -278,7 +278,7 @@
   void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, uint32_t method_idx,
                      const ClassLoader* class_loader, const DexFile& dex_file);
 
-  static void CompileClass(Context* context, size_t class_def_index);
+  static void CompileClass(CompilationContext* context, size_t class_def_index);
 
   void SetGcMaps(const ClassLoader* class_loader, const std::vector<const DexFile*>& dex_files);
   void SetGcMapsDexFile(const ClassLoader* class_loader, const DexFile& dex_file);
diff --git a/src/compiler/codegen/RallocUtil.cc b/src/compiler/codegen/RallocUtil.cc
index affb545..2088cdc 100644
--- a/src/compiler/codegen/RallocUtil.cc
+++ b/src/compiler/codegen/RallocUtil.cc
@@ -1244,8 +1244,8 @@
 /* Returns sp-relative offset in bytes for a VReg */
 extern int oatVRegOffset(CompilationUnit* cUnit, int vReg)
 {
-  return Frame::GetVRegOffset(cUnit->code_item, cUnit->coreSpillMask,
-                              cUnit->fpSpillMask, cUnit->frameSize, vReg);
+  return StackVisitor::GetVRegOffset(cUnit->code_item, cUnit->coreSpillMask,
+                                     cUnit->fpSpillMask, cUnit->frameSize, vReg);
 }
 
 /* Returns sp-relative offset in bytes for a SReg */
diff --git a/src/compiler_llvm/jni_compiler.cc b/src/compiler_llvm/jni_compiler.cc
index bb45fac..37ca82f 100644
--- a/src/compiler_llvm/jni_compiler.cc
+++ b/src/compiler_llvm/jni_compiler.cc
@@ -27,7 +27,6 @@
 #include "object.h"
 #include "runtime.h"
 #include "runtime_support_func.h"
-#include "shadow_frame.h"
 #include "utils_llvm.h"
 
 #include <llvm/BasicBlock.h>
@@ -115,7 +114,7 @@
   // Store the dex pc
   irb_.StoreToObjectOffset(shadow_frame_,
                            ShadowFrame::DexPCOffset(),
-                           irb_.getInt32(0),
+                           irb_.getInt32(DexFile::kDexNoIndex),
                            kTBAAShadowFrame);
 
   // Push the shadow frame
diff --git a/src/compiler_llvm/method_compiler.cc b/src/compiler_llvm/method_compiler.cc
index 754f3ba..eef71db 100644
--- a/src/compiler_llvm/method_compiler.cc
+++ b/src/compiler_llvm/method_compiler.cc
@@ -28,7 +28,6 @@
 #include "object_utils.h"
 #include "runtime_support_func.h"
 #include "runtime_support_llvm.h"
-#include "shadow_frame.h"
 #include "stl_util.h"
 #include "stringprintf.h"
 #include "utils_llvm.h"
diff --git a/src/compiler_llvm/runtime_support_builder.cc b/src/compiler_llvm/runtime_support_builder.cc
index 00a8efa..8bbac94 100644
--- a/src/compiler_llvm/runtime_support_builder.cc
+++ b/src/compiler_llvm/runtime_support_builder.cc
@@ -20,7 +20,6 @@
 #include "ir_builder.h"
 #include "monitor.h"
 #include "object.h"
-#include "shadow_frame.h"
 #include "thread.h"
 #include "utils_llvm.h"
 
diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc
index 155d824..04073e0 100644
--- a/src/compiler_llvm/runtime_support_llvm.cc
+++ b/src/compiler_llvm/runtime_support_llvm.cc
@@ -26,7 +26,6 @@
 #include "runtime_support_func_list.h"
 #include "runtime_support_llvm.h"
 #include "ScopedLocalRef.h"
-#include "shadow_frame.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "utils_llvm.h"
@@ -124,9 +123,7 @@
 void art_throw_no_such_method_from_code(int32_t method_idx) {
   Thread* thread = art_get_current_thread_from_code();
   // We need the calling method as context for the method_idx
-  Frame frame = thread->GetTopOfStack();
-  frame.Next();
-  Method* method = frame.GetMethod();
+  Method* method = thread->GetCurrentMethod();
   thread->ThrowNewException("Ljava/lang/NoSuchMethodError;",
                             MethodNameFromIndex(method,
                                                 method_idx,
@@ -136,8 +133,8 @@
 
 void art_throw_null_pointer_exception_from_code(uint32_t dex_pc) {
   Thread* thread = art_get_current_thread_from_code();
-  NthCallerVisitor visitor(0);
-  thread->WalkStack(&visitor);
+  NthCallerVisitor visitor(thread->GetManagedStack(), 0);
+  visitor.WalkStack();
   Method* throw_method = visitor.caller;
   ThrowNullPointerExceptionFromDexPC(thread, throw_method, dex_pc);
 }
diff --git a/src/debugger.cc b/src/debugger.cc
index 604560e..e3c30f8 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -96,10 +96,10 @@
 
 struct AllocRecordStackTraceElement {
   Method* method;
-  uintptr_t raw_pc;
+  uint32_t dex_pc;
 
   int32_t LineNumber() const {
-    return MethodHelper(method).GetLineNumFromNativePC(raw_pc);
+    return MethodHelper(method).GetLineNumFromDexPC(dex_pc);
   }
 };
 
@@ -947,7 +947,7 @@
 #endif
 }
 
-static void SetLocation(JDWP::JdwpLocation& location, Method* m, uintptr_t native_pc) {
+static void SetLocation(JDWP::JdwpLocation& location, Method* m, uint32_t dex_pc) {
   if (m == NULL) {
     memset(&location, 0, sizeof(location));
   } else {
@@ -955,7 +955,7 @@
     location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
     location.class_id = gRegistry->Add(c);
     location.method_id = ToMethodId(m);
-    location.dex_pc = m->IsNative() ? -1 : m->ToDexPC(native_pc);
+    location.dex_pc = dex_pc;
   }
 }
 
@@ -1467,18 +1467,21 @@
 }
 
 static int GetStackDepth(Thread* thread) {
-  struct CountStackDepthVisitor : public Thread::StackVisitor {
-    CountStackDepthVisitor() : depth(0) {}
-    bool VisitFrame(const Frame& f, uintptr_t) {
-      if (f.HasMethod()) {
+  struct CountStackDepthVisitor : public StackVisitor {
+    CountStackDepthVisitor(const ManagedStack* stack,
+                           const std::vector<TraceStackFrame>* trace_stack) :
+                             StackVisitor(stack, trace_stack), depth(0) {}
+
+    bool VisitFrame() {
+      if (!GetMethod()->IsRuntimeMethod()) {
         ++depth;
       }
       return true;
     }
     size_t depth;
   };
-  CountStackDepthVisitor visitor;
-  thread->WalkStack(&visitor);
+  CountStackDepthVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack());
+  visitor.WalkStack();
   return visitor.depth;
 }
 
@@ -1487,32 +1490,37 @@
   return GetStackDepth(DecodeThread(threadId));
 }
 
-void Dbg::GetThreadFrame(JDWP::ObjectId threadId, int desired_frame_number, JDWP::FrameId* pFrameId, JDWP::JdwpLocation* pLoc) {
+void Dbg::GetThreadFrame(JDWP::ObjectId threadId, int desired_frame_number, JDWP::FrameId* pFrameId,
+                         JDWP::JdwpLocation* pLoc) {
   ScopedThreadListLock thread_list_lock;
-  struct GetFrameVisitor : public Thread::StackVisitor {
-    GetFrameVisitor(int desired_frame_number, JDWP::FrameId* pFrameId, JDWP::JdwpLocation* pLoc)
-        : depth(0), desired_frame_number(desired_frame_number), pFrameId(pFrameId), pLoc(pLoc) {
+  struct GetFrameVisitor : public StackVisitor {
+    GetFrameVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                    int desired_frame_number, JDWP::FrameId* pFrameId, JDWP::JdwpLocation* pLoc) :
+                      StackVisitor(stack, trace_stack), depth(0),
+                      desired_frame_number(desired_frame_number), pFrameId(pFrameId), pLoc(pLoc) {
     }
-    bool VisitFrame(const Frame& f, uintptr_t pc) {
-      if (!f.HasMethod()) {
+
+    bool VisitFrame() {
+      if (GetMethod()->IsRuntimeMethod()) {
         return true; // The debugger can't do anything useful with a frame that has no Method*.
       }
       if (depth == desired_frame_number) {
-        *pFrameId = reinterpret_cast<JDWP::FrameId>(f.GetSP());
-        SetLocation(*pLoc, f.GetMethod(), pc);
+        *pFrameId = GetFrameId();
+        SetLocation(*pLoc, GetMethod(), GetDexPc());
         return false;
       }
       ++depth;
       return true;
     }
     int depth;
-    int desired_frame_number;
-    JDWP::FrameId* pFrameId;
-    JDWP::JdwpLocation* pLoc;
+    const int desired_frame_number;
+    JDWP::FrameId* const pFrameId;
+    JDWP::JdwpLocation* const pLoc;
   };
-  GetFrameVisitor visitor(desired_frame_number, pFrameId, pLoc);
-  visitor.desired_frame_number = desired_frame_number;
-  DecodeThread(threadId)->WalkStack(&visitor);
+  Thread* thread = DecodeThread(threadId);
+  GetFrameVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), desired_frame_number,
+                          pFrameId, pLoc);
+  visitor.WalkStack();
 }
 
 JDWP::ObjectId Dbg::GetThreadSelfId() {
@@ -1556,180 +1564,252 @@
   Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
 }
 
-static Object* GetThis(Frame& f) {
-  Method* m = f.GetMethod();
-  Object* o = NULL;
-  if (!m->IsNative() && !m->IsStatic()) {
-    uint16_t reg = DemangleSlot(0, m);
-    o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
+struct GetThisVisitor : public StackVisitor {
+  GetThisVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                 Context* context, JDWP::FrameId frameId) :
+    StackVisitor(stack, trace_stack, context), thisObject_(NULL), frameId_(frameId) {}
+
+
+  virtual bool VisitFrame() {
+    if (frameId_ != GetFrameId()) {
+      return true;  // continue
+    }
+    Method* m = GetMethod();
+    if (m->IsNative() || m->IsStatic()) {
+      thisObject_ = NULL;
+    } else {
+      uint16_t reg = DemangleSlot(0, m);
+      thisObject_ = reinterpret_cast<Object*>(GetVReg(m, reg));
+    }
+    return false;
   }
-  return o;
+
+  Object* thisObject_;
+  JDWP::FrameId frameId_;
+};
+
+static Object* GetThis(Method** quickFrame) {
+  struct FrameIdVisitor : public StackVisitor {
+    FrameIdVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                   Method** m) : StackVisitor(stack, trace_stack),
+        quick_frame_to_find_(m) , frame_id_(0) {}
+
+    virtual bool VisitFrame() {
+      if (quick_frame_to_find_ != GetCurrentQuickFrame()) {
+        return true;  // Continue.
+      }
+      frame_id_ = GetFrameId();
+      return false;  // Stop.
+    }
+
+    Method** const quick_frame_to_find_;
+    JDWP::FrameId frame_id_;
+  };
+
+  Method* m = *quickFrame;
+  if (m->IsNative() || m->IsStatic()) {
+    return NULL;
+  }
+  Thread* self = Thread::Current();
+  const ManagedStack* stack = self->GetManagedStack();
+  const std::vector<TraceStackFrame>* trace_stack = self->GetTraceStack();
+  FrameIdVisitor frameIdVisitor(stack, trace_stack, quickFrame);
+  frameIdVisitor.WalkStack();
+  UniquePtr<Context> context(Context::Create());
+  GetThisVisitor getThisVisitor(stack, trace_stack, context.get(), frameIdVisitor.frame_id_);
+  getThisVisitor.WalkStack();
+  return getThisVisitor.thisObject_;
 }
 
 void Dbg::GetThisObject(JDWP::FrameId frameId, JDWP::ObjectId* pThisId) {
-  Method** sp = reinterpret_cast<Method**>(frameId);
-  Frame f(sp);
-  Object* o = GetThis(f);
-  *pThisId = gRegistry->Add(o);
+  UniquePtr<Context> context(Context::Create());
+  Thread* self = Thread::Current();
+  GetThisVisitor visitor(self->GetManagedStack(), self->GetTraceStack(), context.get(), frameId);
+  visitor.WalkStack();
+  *pThisId = gRegistry->Add(visitor.thisObject_);
 }
 
-void Dbg::GetLocalValue(JDWP::ObjectId /*threadId*/, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
-  Method** sp = reinterpret_cast<Method**>(frameId);
-  Frame f(sp);
-  Method* m = f.GetMethod();
-  uint16_t reg = DemangleSlot(slot, m);
-
-#if defined(ART_USE_LLVM_COMPILER)
-  UNIMPLEMENTED(FATAL);
-#else
-  const VmapTable vmap_table(m->GetVmapTableRaw());
-  uint32_t vmap_offset;
-  if (vmap_table.IsInContext(reg, vmap_offset)) {
-    UNIMPLEMENTED(FATAL) << "Don't know how to pull locals from callee save frames: " << vmap_offset;
-  }
-#endif
-
-  // TODO: check that the tag is compatible with the actual type of the slot!
-
-  switch (tag) {
-  case JDWP::JT_BOOLEAN:
-    {
-      CHECK_EQ(width, 1U);
-      uint32_t intVal = f.GetVReg(m, reg);
-      VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
-      JDWP::Set1(buf+1, intVal != 0);
+void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) { 
+  struct GetLocalVisitor : public StackVisitor {
+    GetLocalVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                    Context* context, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag,
+                    uint8_t* buf, size_t width) :
+      StackVisitor(stack, trace_stack, context), frame_id_(frameId), slot_(slot), tag_(tag),
+      buf_(buf), width_(width) {
     }
-    break;
-  case JDWP::JT_BYTE:
-    {
-      CHECK_EQ(width, 1U);
-      uint32_t intVal = f.GetVReg(m, reg);
-      VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
-      JDWP::Set1(buf+1, intVal);
-    }
-    break;
-  case JDWP::JT_SHORT:
-  case JDWP::JT_CHAR:
-    {
-      CHECK_EQ(width, 2U);
-      uint32_t intVal = f.GetVReg(m, reg);
-      VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
-      JDWP::Set2BE(buf+1, intVal);
-    }
-    break;
-  case JDWP::JT_INT:
-  case JDWP::JT_FLOAT:
-    {
-      CHECK_EQ(width, 4U);
-      uint32_t intVal = f.GetVReg(m, reg);
-      VLOG(jdwp) << "get int/float local " << reg << " = " << intVal;
-      JDWP::Set4BE(buf+1, intVal);
-    }
-    break;
-  case JDWP::JT_ARRAY:
-    {
-      CHECK_EQ(width, sizeof(JDWP::ObjectId));
-      Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
-      VLOG(jdwp) << "get array local " << reg << " = " << o;
-      if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
-        LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
+    bool VisitFrame() {
+      if (GetFrameId() != frame_id_) {
+        return true;  // Not our frame, carry on.
       }
-      JDWP::SetObjectId(buf+1, gRegistry->Add(o));
-    }
-    break;
-  case JDWP::JT_CLASS_LOADER:
-  case JDWP::JT_CLASS_OBJECT:
-  case JDWP::JT_OBJECT:
-  case JDWP::JT_STRING:
-  case JDWP::JT_THREAD:
-  case JDWP::JT_THREAD_GROUP:
-    {
-      CHECK_EQ(width, sizeof(JDWP::ObjectId));
-      Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
-      VLOG(jdwp) << "get object local " << reg << " = " << o;
-      if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
-        LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
-      }
-      tag = TagFromObject(o);
-      JDWP::SetObjectId(buf+1, gRegistry->Add(o));
-    }
-    break;
-  case JDWP::JT_DOUBLE:
-  case JDWP::JT_LONG:
-    {
-      CHECK_EQ(width, 8U);
-      uint32_t lo = f.GetVReg(m, reg);
-      uint64_t hi = f.GetVReg(m, reg + 1);
-      uint64_t longVal = (hi << 32) | lo;
-      VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
-      JDWP::Set8BE(buf+1, longVal);
-    }
-    break;
-  default:
-    LOG(FATAL) << "Unknown tag " << tag;
-    break;
-  }
+      // TODO: check that the tag is compatible with the actual type of the slot!
+      Method* m = GetMethod();
+      uint16_t reg = DemangleSlot(slot_, m);
 
-  // Prepend tag, which may have been updated.
-  JDWP::Set1(buf, tag);
+      switch (tag_) {
+      case JDWP::JT_BOOLEAN:
+        {
+          CHECK_EQ(width_, 1U);
+          uint32_t intVal = GetVReg(m, reg);
+          VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
+          JDWP::Set1(buf_+1, intVal != 0);
+        }
+        break;
+      case JDWP::JT_BYTE:
+        {
+          CHECK_EQ(width_, 1U);
+          uint32_t intVal = GetVReg(m, reg);
+          VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
+          JDWP::Set1(buf_+1, intVal);
+        }
+        break;
+      case JDWP::JT_SHORT:
+      case JDWP::JT_CHAR:
+        {
+          CHECK_EQ(width_, 2U);
+          uint32_t intVal = GetVReg(m, reg);
+          VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
+          JDWP::Set2BE(buf_+1, intVal);
+        }
+        break;
+      case JDWP::JT_INT:
+      case JDWP::JT_FLOAT:
+        {
+          CHECK_EQ(width_, 4U);
+          uint32_t intVal = GetVReg(m, reg);
+          VLOG(jdwp) << "get int/float local " << reg << " = " << intVal;
+          JDWP::Set4BE(buf_+1, intVal);
+        }
+        break;
+      case JDWP::JT_ARRAY:
+        {
+          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
+          Object* o = reinterpret_cast<Object*>(GetVReg(m, reg));
+          VLOG(jdwp) << "get array local " << reg << " = " << o;
+          if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+            LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
+          }
+          JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
+        }
+        break;
+      case JDWP::JT_CLASS_LOADER:
+      case JDWP::JT_CLASS_OBJECT:
+      case JDWP::JT_OBJECT:
+      case JDWP::JT_STRING:
+      case JDWP::JT_THREAD:
+      case JDWP::JT_THREAD_GROUP:
+        {
+          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
+          Object* o = reinterpret_cast<Object*>(GetVReg(m, reg));
+          VLOG(jdwp) << "get object local " << reg << " = " << o;
+          if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+            LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
+          }
+          tag_ = TagFromObject(o);
+          JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
+        }
+        break;
+      case JDWP::JT_DOUBLE:
+      case JDWP::JT_LONG:
+        {
+          CHECK_EQ(width_, 8U);
+          uint32_t lo = GetVReg(m, reg);
+          uint64_t hi = GetVReg(m, reg + 1);
+          uint64_t longVal = (hi << 32) | lo;
+          VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
+          JDWP::Set8BE(buf_+1, longVal);
+        }
+        break;
+      default:
+        LOG(FATAL) << "Unknown tag " << tag_;
+        break;
+      }
+
+      // Prepend tag, which may have been updated.
+      JDWP::Set1(buf_, tag_);
+      return false;
+    }
+
+    const JDWP::FrameId frame_id_;
+    const int slot_;
+    JDWP::JdwpTag tag_;
+    uint8_t* const buf_;
+    const size_t width_;
+  };
+  Thread* thread = DecodeThread(threadId);
+  UniquePtr<Context> context(Context::Create());
+  GetLocalVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), context.get(),
+                          frameId, slot, tag, buf, width);
+  visitor.WalkStack();
 }
 
-void Dbg::SetLocalValue(JDWP::ObjectId /*threadId*/, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) {
-  Method** sp = reinterpret_cast<Method**>(frameId);
-  Frame f(sp);
-  Method* m = f.GetMethod();
-  uint16_t reg = DemangleSlot(slot, m);
-
-#if defined(ART_USE_LLVM_COMPILER)
-  UNIMPLEMENTED(FATAL);
-#else
-  const VmapTable vmap_table(m->GetVmapTableRaw());
-  uint32_t vmap_offset;
-  if (vmap_table.IsInContext(reg, vmap_offset)) {
-    UNIMPLEMENTED(FATAL) << "Don't know how to pull locals from callee save frames: " << vmap_offset;
-  }
-#endif
-
-  // TODO: check that the tag is compatible with the actual type of the slot!
-
-  switch (tag) {
-  case JDWP::JT_BOOLEAN:
-  case JDWP::JT_BYTE:
-    CHECK_EQ(width, 1U);
-    f.SetVReg(m, reg, static_cast<uint32_t>(value));
-    break;
-  case JDWP::JT_SHORT:
-  case JDWP::JT_CHAR:
-    CHECK_EQ(width, 2U);
-    f.SetVReg(m, reg, static_cast<uint32_t>(value));
-    break;
-  case JDWP::JT_INT:
-  case JDWP::JT_FLOAT:
-    CHECK_EQ(width, 4U);
-    f.SetVReg(m, reg, static_cast<uint32_t>(value));
-    break;
-  case JDWP::JT_ARRAY:
-  case JDWP::JT_OBJECT:
-  case JDWP::JT_STRING:
-    {
-      CHECK_EQ(width, sizeof(JDWP::ObjectId));
-      Object* o = gRegistry->Get<Object*>(static_cast<JDWP::ObjectId>(value));
-      if (o == kInvalidObject) {
-        UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store";
-      }
-      f.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)));
+void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag,
+                        uint64_t value, size_t width) {
+  struct SetLocalVisitor : public StackVisitor {
+    SetLocalVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
+                    size_t width) :
+                      StackVisitor(stack, trace_stack), frame_id_(frame_id), slot_(slot), tag_(tag),
+                      value_(value), width_(width) {
     }
-    break;
-  case JDWP::JT_DOUBLE:
-  case JDWP::JT_LONG:
-    CHECK_EQ(width, 8U);
-    f.SetVReg(m, reg, static_cast<uint32_t>(value));
-    f.SetVReg(m, reg + 1, static_cast<uint32_t>(value >> 32));
-    break;
-  default:
-    LOG(FATAL) << "Unknown tag " << tag;
-    break;
-  }
+    bool VisitFrame() {
+      if (GetFrameId() != frame_id_) {
+        return true;  // Not our frame, carry on.
+      }
+      // TODO: check that the tag is compatible with the actual type of the slot!
+      Method* m = GetMethod();
+      uint16_t reg = DemangleSlot(slot_, m);
+
+      switch (tag_) {
+        case JDWP::JT_BOOLEAN:
+        case JDWP::JT_BYTE:
+          CHECK_EQ(width_, 1U);
+          SetVReg(m, reg, static_cast<uint32_t>(value_));
+          break;
+        case JDWP::JT_SHORT:
+        case JDWP::JT_CHAR:
+          CHECK_EQ(width_, 2U);
+          SetVReg(m, reg, static_cast<uint32_t>(value_));
+          break;
+        case JDWP::JT_INT:
+        case JDWP::JT_FLOAT:
+          CHECK_EQ(width_, 4U);
+          SetVReg(m, reg, static_cast<uint32_t>(value_));
+          break;
+        case JDWP::JT_ARRAY:
+        case JDWP::JT_OBJECT:
+        case JDWP::JT_STRING:
+        {
+          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
+          Object* o = gRegistry->Get<Object*>(static_cast<JDWP::ObjectId>(value_));
+          if (o == kInvalidObject) {
+            UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store";
+          }
+          SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)));
+        }
+        break;
+        case JDWP::JT_DOUBLE:
+        case JDWP::JT_LONG:
+          CHECK_EQ(width_, 8U);
+          SetVReg(m, reg, static_cast<uint32_t>(value_));
+          SetVReg(m, reg + 1, static_cast<uint32_t>(value_ >> 32));
+          break;
+        default:
+          LOG(FATAL) << "Unknown tag " << tag_;
+          break;
+      }
+      return false;
+    }
+
+    const JDWP::FrameId frame_id_;
+    const int slot_;
+    const JDWP::JdwpTag tag_;
+    const uint64_t value_;
+    const size_t width_;
+  };
+  Thread* thread = DecodeThread(threadId);
+  SetLocalVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack(), frameId, slot, tag,
+                          value, width);
+  visitor.WalkStack();
 }
 
 void Dbg::PostLocationEvent(const Method* m, int dex_pc, Object* this_object, int event_flags) {
@@ -1753,19 +1833,20 @@
   }
 }
 
-void Dbg::PostException(Method** sp, Method* throwMethod, uintptr_t throwNativePc, Method* catchMethod, uintptr_t catchNativePc, Object* exception) {
+void Dbg::PostException(JDWP::FrameId throwFrameId, Method* throwMethod, uint32_t throwDexPc,
+                        Method* catchMethod, uint32_t catchDexPc, Throwable* exception) {
   if (!IsDebuggerActive()) {
     return;
   }
 
   JDWP::JdwpLocation throw_location;
-  SetLocation(throw_location, throwMethod, throwNativePc);
+  SetLocation(throw_location, throwMethod, throwDexPc);
   JDWP::JdwpLocation catch_location;
-  SetLocation(catch_location, catchMethod, catchNativePc);
+  SetLocation(catch_location, catchMethod, catchDexPc);
 
   // We need 'this' for InstanceOnly filters.
   JDWP::ObjectId this_id;
-  GetThisObject(reinterpret_cast<JDWP::FrameId>(sp), &this_id);
+  GetThisObject(throwFrameId, &this_id);
 
   /*
    * Hand the event to the JDWP exception handler.  Note we're using the
@@ -1800,15 +1881,13 @@
     return;
   }
 
-  Frame f(sp);
-  f.Next(); // Skip callee save frame.
-  Method* m = f.GetMethod();
+  Method* m = self->GetCurrentMethod();
 
   if (dex_pc == -1) {
     // We use a pc of -1 to represent method entry, since we might branch back to pc 0 later.
     // This means that for this special notification, there can't be anything else interesting
     // going on, so we're done already.
-    Dbg::PostLocationEvent(m, 0, GetThis(f), kMethodEntry);
+    Dbg::PostLocationEvent(m, 0, GetThis(sp), kMethodEntry);
     return;
   }
 
@@ -1899,7 +1978,7 @@
   // If there's something interesting going on, see if it matches one
   // of the debugger filters.
   if (event_flags != 0) {
-    Dbg::PostLocationEvent(m, dex_pc, GetThis(f), event_flags);
+    Dbg::PostLocationEvent(m, dex_pc, GetThis(sp), event_flags);
   }
 }
 
@@ -1942,32 +2021,34 @@
   // is for step-out.
   //
 
-  struct SingleStepStackVisitor : public Thread::StackVisitor {
-    SingleStepStackVisitor() {
+  struct SingleStepStackVisitor : public StackVisitor {
+    SingleStepStackVisitor(const ManagedStack* stack,
+                           const std::vector<TraceStackFrame>* trace_stack) :
+                             StackVisitor(stack, trace_stack) {
       MutexLock mu(gBreakpointsLock); // Keep GCC happy.
       gSingleStepControl.method = NULL;
       gSingleStepControl.stack_depth = 0;
     }
-    bool VisitFrame(const Frame& f, uintptr_t pc) {
+    bool VisitFrame() {
       MutexLock mu(gBreakpointsLock); // Keep GCC happy.
-      if (f.HasMethod()) {
+      const Method* m = GetMethod();
+      if (!m->IsRuntimeMethod()) {
         ++gSingleStepControl.stack_depth;
         if (gSingleStepControl.method == NULL) {
-          const Method* m = f.GetMethod();
           const DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
           gSingleStepControl.method = m;
           gSingleStepControl.line_number = -1;
           if (dex_cache != NULL) {
             const DexFile& dex_file = Runtime::Current()->GetClassLinker()->FindDexFile(dex_cache);
-            gSingleStepControl.line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc));
+            gSingleStepControl.line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
           }
         }
       }
       return true;
     }
   };
-  SingleStepStackVisitor visitor;
-  thread->WalkStack(&visitor);
+  SingleStepStackVisitor visitor(thread->GetManagedStack(), thread->GetTraceStack());
+  visitor.WalkStack();
 
   //
   // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
@@ -2835,17 +2916,20 @@
   }
 }
 
-struct AllocRecordStackVisitor : public Thread::StackVisitor {
-  explicit AllocRecordStackVisitor(AllocRecord* record) : record(record), depth(0) {
+struct AllocRecordStackVisitor : public StackVisitor {
+  AllocRecordStackVisitor(const ManagedStack* stack,
+                          const std::vector<TraceStackFrame>* trace_stack, AllocRecord* record) :
+    StackVisitor(stack, trace_stack), record(record), depth(0) {
   }
 
-  bool VisitFrame(const Frame& f, uintptr_t pc) {
+  bool VisitFrame() {
     if (depth >= kMaxAllocRecordStackDepth) {
       return false;
     }
-    if (f.HasMethod()) {
-      record->stack[depth].method = f.GetMethod();
-      record->stack[depth].raw_pc = pc;
+    Method* m = GetMethod();
+    if (!m->IsRuntimeMethod()) {
+      record->stack[depth].method = m;
+      record->stack[depth].dex_pc = GetDexPc();
       ++depth;
     }
     return true;
@@ -2855,7 +2939,7 @@
     // Clear out any unused stack trace elements.
     for (; depth < kMaxAllocRecordStackDepth; ++depth) {
       record->stack[depth].method = NULL;
-      record->stack[depth].raw_pc = 0;
+      record->stack[depth].dex_pc = 0;
     }
   }
 
@@ -2884,8 +2968,8 @@
   record->thin_lock_id = self->GetThinLockId();
 
   // Fill in the stack trace.
-  AllocRecordStackVisitor visitor(record);
-  self->WalkStack(&visitor);
+  AllocRecordStackVisitor visitor(self->GetManagedStack(), self->GetTraceStack(), record);
+  visitor.WalkStack();
 
   if (gAllocRecordCount < kNumAllocRecords) {
     ++gAllocRecordCount;
diff --git a/src/debugger.h b/src/debugger.h
index 9dffa7c..c6880f7 100644
--- a/src/debugger.h
+++ b/src/debugger.h
@@ -214,7 +214,7 @@
     kMethodExit     = 0x08,
   };
   static void PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags);
-  static void PostException(Method** sp, Method* throwMethod, uintptr_t throwNativePc, Method* catchMethod, uintptr_t catchNativePc, Object* exception);
+  static void PostException(JDWP::FrameId frameId, Method* throwMethod, uint32_t throwDexPc, Method* catchMethod, uint32_t catchDexPc, Throwable* exception);
   static void PostThreadStart(Thread* t);
   static void PostThreadDeath(Thread* t);
   static void PostClassPrepare(Class* c);
diff --git a/src/exception_test.cc b/src/exception_test.cc
index 2730954..90bcb7c 100644
--- a/src/exception_test.cc
+++ b/src/exception_test.cc
@@ -184,6 +184,9 @@
 
 #if !defined(ART_USE_LLVM_COMPILER)
   thread->SetTopOfStack(NULL, 0); // Disarm the assertion that no code is running when we detach.
+#else
+  thread->PopShadowFrame();
+  thread->PopShadowFrame();
 #endif
 }
 
diff --git a/src/heap.cc b/src/heap.cc
index 55a8afe..55d9c03 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -471,9 +471,10 @@
 Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
   lock_->AssertHeld();
 
-  // Since allocation can cause a GC which will need to SuspendAll,
-  // make sure all allocators are in the kRunnable state.
-  CHECK_EQ(Thread::Current()->GetState(), kRunnable);
+  // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
+  // done in the runnable state where suspension is expected.
+  DCHECK_EQ(Thread::Current()->GetState(), kRunnable);
+  Thread::Current()->AssertThreadSuspensionIsAllowable();
 
   // Fail impossible allocations
   if (alloc_size > space->Capacity()) {
diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc
index 69f171c..81b87ef 100644
--- a/src/indirect_reference_table.cc
+++ b/src/indirect_reference_table.cc
@@ -226,7 +226,7 @@
 
   JavaVMExt* vm = Runtime::Current()->GetJavaVM();
   if (GetIndirectRefKind(iref) == kSirtOrInvalid &&
-      Thread::Current()->StackReferencesContain(reinterpret_cast<jobject>(iref))) {
+      Thread::Current()->SirtContains(reinterpret_cast<jobject>(iref))) {
     LOG(WARNING) << "Attempt to remove local SIRT entry from IRT, ignoring";
     return true;
   }
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index 716bb85..fa79a01 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -2376,7 +2376,7 @@
       return JNIWeakGlobalRefType;
     case kSirtOrInvalid:
       // Is it in a stack IRT?
-      if (ts.Self()->StackReferencesContain(java_object)) {
+      if (ts.Self()->SirtContains(java_object)) {
         return JNILocalRefType;
       }
 
diff --git a/src/monitor.cc b/src/monitor.cc
index 30e0172..8bb7e45 100644
--- a/src/monitor.cc
+++ b/src/monitor.cc
@@ -118,7 +118,7 @@
       wait_set_(NULL),
       lock_("a monitor lock"),
       locking_method_(NULL),
-      locking_pc_(0) {
+      locking_dex_pc_(0) {
 }
 
 Monitor::~Monitor() {
@@ -188,14 +188,14 @@
   if (!lock_.TryLock()) {
     uint32_t wait_threshold = lock_profiling_threshold_;
     const Method* current_locking_method = NULL;
-    uintptr_t current_locking_pc = 0;
+    uint32_t current_locking_dex_pc = 0;
     {
       ScopedThreadStateChange tsc(self, kBlocked);
       if (wait_threshold != 0) {
         waitStart = NanoTime() / 1000;
       }
       current_locking_method = locking_method_;
-      current_locking_pc = locking_pc_;
+      current_locking_dex_pc = locking_dex_pc_;
 
       lock_.Lock();
       if (wait_threshold != 0) {
@@ -214,7 +214,7 @@
       if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
         const char* current_locking_filename;
         uint32_t current_locking_line_number;
-        TranslateLocation(current_locking_method, current_locking_pc,
+        TranslateLocation(current_locking_method, current_locking_dex_pc,
                           current_locking_filename, current_locking_line_number);
         LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
       }
@@ -226,7 +226,7 @@
   // When debugging, save the current monitor holder for future
   // acquisition failures to use in sampled logging.
   if (lock_profiling_threshold_ != 0) {
-    locking_method_ = self->GetCurrentMethod(&locking_pc_);
+    locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
   }
 }
 
@@ -323,7 +323,7 @@
     if (lock_count_ == 0) {
       owner_ = NULL;
       locking_method_ = NULL;
-      locking_pc_ = 0;
+      locking_dex_pc_ = 0;
       lock_.Unlock();
     } else {
       --lock_count_;
@@ -427,13 +427,13 @@
    * not order sensitive as we hold the pthread mutex.
    */
   AppendToWaitSet(self);
-  int prevLockCount = lock_count_;
+  int prev_lock_count = lock_count_;
   lock_count_ = 0;
   owner_ = NULL;
-  const Method* savedMethod = locking_method_;
+  const Method* saved_method = locking_method_;
   locking_method_ = NULL;
-  uintptr_t savedPc = locking_pc_;
-  locking_pc_ = 0;
+  uintptr_t saved_dex_pc = locking_dex_pc_;
+  locking_dex_pc_ = 0;
 
   /*
    * Update thread status.  If the GC wakes up, it'll ignore us, knowing
@@ -498,9 +498,9 @@
    * updates is not order sensitive as we hold the pthread mutex.
    */
   owner_ = self;
-  lock_count_ = prevLockCount;
-  locking_method_ = savedMethod;
-  locking_pc_ = savedPc;
+  lock_count_ = prev_lock_count;
+  locking_method_ = saved_method;
+  locking_dex_pc_ = saved_dex_pc;
   RemoveFromWaitSet(self);
 
   /* set self->status back to kRunnable, and self-suspend if needed */
@@ -857,7 +857,7 @@
   os << "\n";
 }
 
-void Monitor::TranslateLocation(const Method* method, uint32_t pc,
+void Monitor::TranslateLocation(const Method* method, uint32_t dex_pc,
                                 const char*& source_file, uint32_t& line_number) const {
   // If method is null, location is unknown
   if (method == NULL) {
@@ -870,7 +870,7 @@
   if (source_file == NULL) {
     source_file = "";
   }
-  line_number = mh.GetLineNumFromNativePC(pc);
+  line_number = mh.GetLineNumFromDexPC(dex_pc);
 }
 
 MonitorList::MonitorList() : lock_("MonitorList lock") {
diff --git a/src/monitor.h b/src/monitor.h
index e5aa01e..300e5a5 100644
--- a/src/monitor.h
+++ b/src/monitor.h
@@ -120,11 +120,11 @@
 
   Mutex lock_;
 
-  // Method and pc where the lock owner acquired the lock, used when lock
+  // Method and dex pc where the lock owner acquired the lock, used when lock
   // sampling is enabled. locking_method_ may be null if the lock is currently
   // unlocked, or if the lock is acquired by the system when the stack is empty.
   const Method* locking_method_;
-  uintptr_t locking_pc_;
+  uint32_t locking_dex_pc_;
 
   friend class MonitorList;
   friend class Object;
diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc
index afa09b3..89d7130 100644
--- a/src/native/dalvik_system_DexFile.cc
+++ b/src/native/dalvik_system_DexFile.cc
@@ -24,6 +24,7 @@
 #include "logging.h"
 #include "os.h"
 #include "runtime.h"
+#include "scoped_jni_thread_state.h"
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
 #include "space.h"
@@ -125,7 +126,7 @@
 
 static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader,
                                         jint cookie) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState tsc(env);
   const DexFile* dex_file = toDexFile(cookie);
   if (dex_file == NULL) {
     return NULL;
@@ -139,11 +140,10 @@
   if (dex_class_def == NULL) {
     return NULL;
   }
-
-  Object* class_loader_object = Decode<Object*>(env, javaLoader);
-  ClassLoader* class_loader = down_cast<ClassLoader*>(class_loader_object);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   class_linker->RegisterDexFile(*dex_file);
+  Object* class_loader_object = Decode<Object*>(env, javaLoader);
+  ClassLoader* class_loader = down_cast<ClassLoader*>(class_loader_object);
   Class* result = class_linker->DefineClass(descriptor, class_loader, *dex_file, *dex_class_def);
   return AddLocalReference<jclass>(env, result);
 }
diff --git a/src/native/dalvik_system_VMStack.cc b/src/native/dalvik_system_VMStack.cc
index ab82694..e3ecbd9 100644
--- a/src/native/dalvik_system_VMStack.cc
+++ b/src/native/dalvik_system_VMStack.cc
@@ -19,6 +19,7 @@
 #include "nth_caller_visitor.h"
 #include "object.h"
 #include "scoped_heap_lock.h"
+#include "scoped_jni_thread_state.h"
 #include "scoped_thread_list_lock.h"
 #include "thread_list.h"
 
@@ -43,18 +44,22 @@
 
 // Returns the defining class loader of the caller's caller.
 static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) {
-  NthCallerVisitor visitor(2);
-  Thread::Current()->WalkStack(&visitor);
+  ScopedJniThreadState ts(env, kNative);  // Not a state change out of native.
+  NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 2);
+  visitor.WalkStack();
   return AddLocalReference<jobject>(env, visitor.caller->GetDeclaringClass()->GetClassLoader());
 }
 
 static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap, jobject javaSystem) {
-  struct ClosestUserClassLoaderVisitor : public Thread::StackVisitor {
-    ClosestUserClassLoaderVisitor(Object* bootstrap, Object* system)
-      : bootstrap(bootstrap), system(system), class_loader(NULL) {}
-    bool VisitFrame(const Frame& f, uintptr_t) {
+  struct ClosestUserClassLoaderVisitor : public StackVisitor {
+    ClosestUserClassLoaderVisitor(const ManagedStack* stack,
+                                  const std::vector<TraceStackFrame>* trace_stack,
+                                  Object* bootstrap, Object* system)
+      : StackVisitor(stack, trace_stack), bootstrap(bootstrap), system(system),
+        class_loader(NULL) {}
+    bool VisitFrame() {
       DCHECK(class_loader == NULL);
-      Class* c = f.GetMethod()->GetDeclaringClass();
+      Class* c = GetMethod()->GetDeclaringClass();
       Object* cl = c->GetClassLoader();
       if (cl != NULL && cl != bootstrap && cl != system) {
         class_loader = cl;
@@ -66,17 +71,20 @@
     Object* system;
     Object* class_loader;
   };
+  ScopedJniThreadState ts(env);
   Object* bootstrap = Decode<Object*>(env, javaBootstrap);
   Object* system = Decode<Object*>(env, javaSystem);
-  ClosestUserClassLoaderVisitor visitor(bootstrap, system);
-  Thread::Current()->WalkStack(&visitor);
+  ClosestUserClassLoaderVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(),
+                                        bootstrap, system);
+  visitor.WalkStack();
   return AddLocalReference<jobject>(env, visitor.class_loader);
 }
 
 // Returns the class of the caller's caller's caller.
 static jclass VMStack_getStackClass2(JNIEnv* env, jclass) {
-  NthCallerVisitor visitor(3);
-  Thread::Current()->WalkStack(&visitor);
+  ScopedJniThreadState ts(env, kNative);  // Not a state change out of native.
+  NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 3);
+  visitor.WalkStack();
   return AddLocalReference<jclass>(env, visitor.caller->GetDeclaringClass());
 }
 
diff --git a/src/native/java_lang_Class.cc b/src/native/java_lang_Class.cc
index 66c83c7..99e3a26 100644
--- a/src/native/java_lang_Class.cc
+++ b/src/native/java_lang_Class.cc
@@ -20,6 +20,7 @@
 #include "nth_caller_visitor.h"
 #include "object.h"
 #include "object_utils.h"
+#include "scoped_jni_thread_state.h"
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
 #include "well_known_classes.h"
@@ -307,7 +308,7 @@
 }
 
 static jobject Class_getDeclaredFieldNative(JNIEnv* env, jclass java_class, jobject jname) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState ts(env);
   Class* c = DecodeClass(env, java_class);
   if (c == NULL) {
     return NULL;
@@ -343,23 +344,23 @@
 }
 
 static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState ts(env);
   Class* c = DecodeClass(env, javaThis);
   return AddLocalReference<jstring>(env, c->ComputeName());
 }
 
 static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState ts(env);
   SynthesizedProxyClass* c = down_cast<SynthesizedProxyClass*>(DecodeClass(env, javaThis));
   return AddLocalReference<jobjectArray>(env, c->GetInterfaces()->Clone());
 }
 
 static jboolean Class_isAssignableFrom(JNIEnv* env, jobject javaLhs, jclass javaRhs) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState ts(env);
   Class* lhs = DecodeClass(env, javaLhs);
   Class* rhs = Decode<Class*>(env, javaRhs); // Can be null.
   if (rhs == NULL) {
-    Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "class == null");
+    ts.Self()->ThrowNewException("Ljava/lang/NullPointerException;", "class == null");
     return JNI_FALSE;
   }
   return lhs->IsAssignableFrom(rhs) ? JNI_TRUE : JNI_FALSE;
@@ -395,10 +396,10 @@
 }
 
 static jobject Class_newInstanceImpl(JNIEnv* env, jobject javaThis) {
-  ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
+  ScopedJniThreadState ts(env);
   Class* c = DecodeClass(env, javaThis);
   if (c->IsPrimitive() || c->IsInterface() || c->IsArrayClass() || c->IsAbstract()) {
-    Thread::Current()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
+    ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
         "Class %s can not be instantiated", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str());
     return NULL;
   }
@@ -409,7 +410,7 @@
 
   Method* init = c->FindDeclaredDirectMethod("<init>", "()V");
   if (init == NULL) {
-    Thread::Current()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
+    ts.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
         "Class %s has no default <init>()V constructor", PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str());
     return NULL;
   }
@@ -423,20 +424,20 @@
   // constructor must be public or, if the caller is in the same package,
   // have package scope.
 
-  NthCallerVisitor visitor(2);
-  Thread::Current()->WalkStack(&visitor);
+  NthCallerVisitor visitor(ts.Self()->GetManagedStack(), ts.Self()->GetTraceStack(), 2);
+  visitor.WalkStack();
   Class* caller_class = visitor.caller->GetDeclaringClass();
 
   ClassHelper caller_ch(caller_class);
   if (!caller_class->CanAccess(c)) {
-    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;",
+    ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;",
         "Class %s is not accessible from class %s",
         PrettyDescriptor(ClassHelper(c).GetDescriptor()).c_str(),
         PrettyDescriptor(caller_ch.GetDescriptor()).c_str());
     return NULL;
   }
   if (!CheckMemberAccess(caller_class, init->GetDeclaringClass(), init->GetAccessFlags())) {
-    Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;",
+    ts.Self()->ThrowNewExceptionF("Ljava/lang/IllegalAccessException;",
         "%s is not accessible from class %s",
         PrettyMethod(init).c_str(),
         PrettyDescriptor(caller_ch.GetDescriptor()).c_str());
@@ -445,7 +446,7 @@
 
   Object* new_obj = c->AllocObject();
   if (new_obj == NULL) {
-    DCHECK(Thread::Current()->IsExceptionPending());
+    DCHECK(ts.Self()->IsExceptionPending());
     return NULL;
   }
 
diff --git a/src/nth_caller_visitor.h b/src/nth_caller_visitor.h
index 84d2b3b..db4d28c 100644
--- a/src/nth_caller_visitor.h
+++ b/src/nth_caller_visitor.h
@@ -23,20 +23,19 @@
 namespace art {
 
 // Walks up the stack 'n' callers, when used with Thread::WalkStack.
-struct NthCallerVisitor : public Thread::StackVisitor {
-  NthCallerVisitor(size_t n) : n(n), count(0), pc(0), caller(NULL) {}
-  bool VisitFrame(const Frame& f, uintptr_t fpc) {
+struct NthCallerVisitor : public StackVisitor {
+  NthCallerVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                   size_t n) : StackVisitor(stack, trace_stack), n(n), count(0), caller(NULL) {}
+  bool VisitFrame() {
     DCHECK(caller == NULL);
     if (count++ == n) {
-      caller = f.GetMethod();
-      pc = fpc;
+      caller = GetMethod();
       return false;
     }
     return true;
   }
   size_t n;
   size_t count;
-  uintptr_t pc;
   Method* caller;
 };
 
diff --git a/src/oat/runtime/arm/context_arm.cc b/src/oat/runtime/arm/context_arm.cc
index 50c386f..2959ef6 100644
--- a/src/oat/runtime/arm/context_arm.cc
+++ b/src/oat/runtime/arm/context_arm.cc
@@ -33,18 +33,19 @@
 #endif
 }
 
-void ArmContext::FillCalleeSaves(const Frame& fr) {
+void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
   Method* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   uint32_t fp_core_spills = method->GetFpSpillMask();
   size_t spill_count = __builtin_popcount(core_spills);
   size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
     // Lowest number spill is furthest away, walk registers and fill into context
     int j = 1;
     for (int i = 0; i < 16; i++) {
       if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.LoadCalleeSave(spill_count - j);
+        gprs_[i] = fr.LoadCalleeSave(spill_count - j, frame_size);
         j++;
       }
     }
@@ -54,7 +55,7 @@
     int j = 1;
     for (int i = 0; i < 32; i++) {
       if (((fp_core_spills >> i) & 1) != 0) {
-        fprs_[i] = fr.LoadCalleeSave(spill_count + fp_spill_count - j);
+        fprs_[i] = fr.LoadCalleeSave(spill_count + fp_spill_count - j, frame_size);
         j++;
       }
     }
diff --git a/src/oat/runtime/arm/context_arm.h b/src/oat/runtime/arm/context_arm.h
index 73cb50f..6f42cc3 100644
--- a/src/oat/runtime/arm/context_arm.h
+++ b/src/oat/runtime/arm/context_arm.h
@@ -28,7 +28,7 @@
   ArmContext();
   virtual ~ArmContext() {}
 
-  virtual void FillCalleeSaves(const Frame& fr);
+  virtual void FillCalleeSaves(const StackVisitor& fr);
 
   virtual void SetSP(uintptr_t new_sp) {
     gprs_[SP] = new_sp;
diff --git a/src/oat/runtime/context.h b/src/oat/runtime/context.h
index 6c7359b..7002f6a 100644
--- a/src/oat/runtime/context.h
+++ b/src/oat/runtime/context.h
@@ -22,7 +22,7 @@
 
 namespace art {
 
-class Frame;
+class StackVisitor;
 
 // Representation of a thread's context on the executing machine
 class Context {
@@ -34,7 +34,7 @@
 
   // Read values from callee saves in the given frame. The frame also holds
   // the method that holds the layout.
-  virtual void FillCalleeSaves(const Frame& fr) = 0;
+  virtual void FillCalleeSaves(const StackVisitor& fr) = 0;
 
   // Set the stack pointer value
   virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc
index ff19a4c..cfa1a11 100644
--- a/src/oat/runtime/support_jni.cc
+++ b/src/oat/runtime/support_jni.cc
@@ -90,7 +90,7 @@
   // | unused |
   // | unused |
   // | unused | <- sp
-  Method* jni_method = self->GetTopOfStack().GetMethod();
+  Method* jni_method = self->GetCurrentMethod();
   DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
   intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
   // Fix up this/jclass argument
diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc
index 2a46c8b..522ccf2 100644
--- a/src/oat/runtime/support_stubs.cc
+++ b/src/oat/runtime/support_stubs.cc
@@ -221,9 +221,8 @@
 #else // ART_USE_LLVM_COMPILER
 const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** called_addr,
                                                      Thread* thread, Runtime::TrampolineType type) {
-  NthCallerVisitor visitor(0);
-  thread->WalkStack(&visitor);
-  Method* caller = visitor.caller;
+  uint32_t dex_pc;
+  Method* caller = thread->GetCurrentMethod(&dex_pc);
 
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   bool is_static;
@@ -231,8 +230,6 @@
   uint32_t dex_method_idx;
   if (type == Runtime::kUnknownMethod) {
     DCHECK(called->IsRuntimeMethod());
-    // less two as return address may span into next dex instruction
-    uint32_t dex_pc = static_cast<uint32_t>(visitor.pc);
     const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
     CHECK_LT(dex_pc, code->insns_size_in_code_units_);
     const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc
index 4293228..31cf7d9 100644
--- a/src/oat/runtime/support_throw.cc
+++ b/src/oat/runtime/support_throw.cc
@@ -49,11 +49,8 @@
 // Called by generated call to throw a NPE exception.
 extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
-  Frame frame = self->GetTopOfStack();
-  uintptr_t throw_native_pc = frame.GetReturnPC();
-  frame.Next();
-  Method* throw_method = frame.GetMethod();
-  uint32_t dex_pc = throw_method->ToDexPC(throw_native_pc - 2);
+  uint32_t dex_pc;
+  Method* throw_method = self->GetCurrentMethod(&dex_pc);
   ThrowNullPointerExceptionFromDexPC(self, throw_method, dex_pc);
   self->DeliverException();
 }
@@ -88,9 +85,7 @@
 
 extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
-  Frame frame = self->GetTopOfStack();  // We need the calling method as context for the method_idx
-  frame.Next();
-  Method* method = frame.GetMethod();
+  Method* method = self->GetCurrentMethod();
   self->ThrowNewException("Ljava/lang/NoSuchMethodError;",
       MethodNameFromIndex(method, method_idx, verifier::VERIFY_ERROR_REF_METHOD, false).c_str());
   self->DeliverException();
@@ -98,9 +93,7 @@
 
 extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self, Method** sp) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
-  Frame frame = self->GetTopOfStack();  // We need the calling method as context to interpret 'ref'
-  frame.Next();
-  Method* method = frame.GetMethod();
+  Method* method = self->GetCurrentMethod();
   ThrowVerificationError(self, method, kind, ref);
   self->DeliverException();
 }
diff --git a/src/oat/runtime/x86/context_x86.cc b/src/oat/runtime/x86/context_x86.cc
index 412b655..4d84f2b 100644
--- a/src/oat/runtime/x86/context_x86.cc
+++ b/src/oat/runtime/x86/context_x86.cc
@@ -31,17 +31,18 @@
 #endif
 }
 
-void X86Context::FillCalleeSaves(const Frame& fr) {
+void X86Context::FillCalleeSaves(const StackVisitor& fr) {
   Method* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   size_t spill_count = __builtin_popcount(core_spills);
-  CHECK_EQ(method->GetFpSpillMask(), 0u);
+  DCHECK_EQ(method->GetFpSpillMask(), 0u);
+  size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
     // Lowest number spill is furthest away, walk registers and fill into context.
     int j = 2;  // Offset j to skip return address spill.
     for (int i = 0; i < 8; i++) {
       if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.LoadCalleeSave(spill_count - j);
+        gprs_[i] = fr.LoadCalleeSave(spill_count - j, frame_size);
         j++;
       }
     }
diff --git a/src/oat/runtime/x86/context_x86.h b/src/oat/runtime/x86/context_x86.h
index 845f6c3..3d6b1d9 100644
--- a/src/oat/runtime/x86/context_x86.h
+++ b/src/oat/runtime/x86/context_x86.h
@@ -29,7 +29,7 @@
   virtual ~X86Context() {}
 
   // No callee saves on X86
-  virtual void FillCalleeSaves(const Frame& fr);
+  virtual void FillCalleeSaves(const StackVisitor& fr);
 
   virtual void SetSP(uintptr_t new_sp) {
     gprs_[ESP] = new_sp;
diff --git a/src/object.cc b/src/object.cc
index 9724e42..b728e28 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -544,11 +544,10 @@
 void Method::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const {
   // Push a transition back into managed code onto the linked list in thread.
   CHECK_EQ(kRunnable, self->GetState());
+  self->AssertThreadSuspensionIsAllowable();
 
-#if !defined(ART_USE_LLVM_COMPILER)
-  NativeToManagedRecord record;
-  self->PushNativeToManagedRecord(&record);
-#endif
+  ManagedStack fragment;
+  self->PushManagedStackFragment(&fragment);
 
   // Call the invoke stub associated with the method.
   // Pass everything as arguments.
@@ -584,10 +583,8 @@
     }
   }
 
-#if !defined(ART_USE_LLVM_COMPILER)
   // Pop transition.
-  self->PopNativeToManagedRecord(record);
-#endif
+  self->PopManagedStackFragment(fragment);
 }
 
 bool Method::IsRegistered() const {
@@ -1510,8 +1507,8 @@
     for (int32_t i = 0; i < depth; ++i) {
       Method* method = down_cast<Method*>(method_trace->Get(i));
       mh.ChangeMethod(method);
-      uint32_t native_pc = pc_trace->Get(i);
-      int32_t line_number = mh.GetLineNumFromNativePC(native_pc);
+      uint32_t dex_pc = pc_trace->Get(i);
+      int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
       const char* source_file = mh.GetDeclaringClassSourceFile();
       result += StringPrintf("  at %s (%s:%d)\n", PrettyMethod(method, true).c_str(),
                              source_file, line_number);
diff --git a/src/object_utils.h b/src/object_utils.h
index ca666bf..c8d50a1 100644
--- a/src/object_utils.h
+++ b/src/object_utils.h
@@ -505,9 +505,13 @@
     return dex_file.GetTypeDescriptor(dex_file.GetTypeId(return_type_idx));
   }
 
-  int32_t GetLineNumFromNativePC(uintptr_t raw_pc) {
-    const DexFile& dex_file = GetDexFile();
-    return dex_file.GetLineNumFromPC(method_, method_->ToDexPC(raw_pc));
+  int32_t GetLineNumFromDexPC(uint32_t dex_pc) {
+    if (dex_pc == DexFile::kDexNoIndex) {
+      return method_->IsNative() ? -2 : -1;
+    } else {
+      const DexFile& dex_file = GetDexFile();
+      return dex_file.GetLineNumFromPC(method_, dex_pc);
+    }
   }
 
   const char* GetDeclaringClassDescriptor() {
diff --git a/src/runtime.cc b/src/runtime.cc
index d7bef1e..b071ef4 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -862,7 +862,7 @@
   if (self == NULL) {
     LOG(FATAL) << "attempting to detach thread that is not attached";
   }
-  if (self->GetTopOfStack().GetSP() != NULL) {
+  if (self->HasManagedStack()) {
     LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
   }
   thread_list_->Unregister();
diff --git a/src/scoped_jni_thread_state.h b/src/scoped_jni_thread_state.h
index 7ef92d4..9b3e63c 100644
--- a/src/scoped_jni_thread_state.h
+++ b/src/scoped_jni_thread_state.h
@@ -25,10 +25,10 @@
 // that are using a JNIEnv on the wrong thread.
 class ScopedJniThreadState {
  public:
-  explicit ScopedJniThreadState(JNIEnv* env)
+  explicit ScopedJniThreadState(JNIEnv* env, ThreadState new_state = kRunnable)
       : env_(reinterpret_cast<JNIEnvExt*>(env)) {
     self_ = ThreadForEnv(env);
-    old_thread_state_ = self_->SetState(kRunnable);
+    old_thread_state_ = self_->SetState(new_state);
     self_->VerifyStack();
   }
 
diff --git a/src/shadow_frame.h b/src/shadow_frame.h
deleted file mode 100644
index c4a633f..0000000
--- a/src/shadow_frame.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_SHADOW_FRAME_H_
-#define ART_SRC_SHADOW_FRAME_H_
-
-#include "logging.h"
-#include "macros.h"
-
-namespace art {
-
-class Object;
-class Method;
-
-class ShadowFrame {
- public:
-  // Number of references contained within this shadow frame
-  uint32_t NumberOfReferences() const {
-    return number_of_references_;
-  }
-
-  void SetNumberOfReferences(uint32_t number_of_references) {
-    number_of_references_ = number_of_references;
-  }
-
-  // Caller dex pc
-  uint32_t GetDexPC() const {
-    return dex_pc_;
-  }
-
-  void SetDexPC(uint32_t dex_pc) {
-    dex_pc_ = dex_pc;
-  }
-
-  // Link to previous shadow frame or NULL
-  ShadowFrame* GetLink() const {
-    return link_;
-  }
-
-  void SetLink(ShadowFrame* frame) {
-    DCHECK_NE(this, frame);
-    link_ = frame;
-  }
-
-  Object* GetReference(size_t i) const {
-    DCHECK_LT(i, number_of_references_);
-    return references_[i];
-  }
-
-  void SetReference(size_t i, Object* object) {
-    DCHECK_LT(i, number_of_references_);
-    references_[i] = object;
-  }
-
-  Method* GetMethod() const {
-    DCHECK_NE(method_, static_cast<void*>(NULL));
-    return method_;
-  }
-
-  void SetMethod(Method* method) {
-    DCHECK_NE(method, static_cast<void*>(NULL));
-    method_ = method;
-  }
-
-  bool Contains(Object** shadow_frame_entry) const {
-    // A ShadowFrame should at least contain a reference. Even if a
-    // native method has no argument, we put jobject or jclass as a
-    // reference. The former is "this", while the latter is for static
-    // method.
-    DCHECK_GT(number_of_references_, 0U);
-    return ((&references_[0] <= shadow_frame_entry)
-            && (shadow_frame_entry <= (&references_[number_of_references_ - 1])));
-  }
-
-  // Offset of link within shadow frame
-  static size_t LinkOffset() {
-    return OFFSETOF_MEMBER(ShadowFrame, link_);
-  }
-
-  // Offset of method within shadow frame
-  static size_t MethodOffset() {
-    return OFFSETOF_MEMBER(ShadowFrame, method_);
-  }
-
-  // Offset of dex pc within shadow frame
-  static size_t DexPCOffset() {
-    return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
-  }
-
-  // Offset of length within shadow frame
-  static size_t NumberOfReferencesOffset() {
-    return OFFSETOF_MEMBER(ShadowFrame, number_of_references_);
-  }
-
-  // Offset of references within shadow frame
-  static size_t ReferencesOffset() {
-    return OFFSETOF_MEMBER(ShadowFrame, references_);
-  }
-
- private:
-  // ShadowFrame should be allocated by the generated code directly.
-  // We should not create new shadow stack in the runtime support function.
-  ~ShadowFrame() {}
-
-  uint32_t number_of_references_;
-  ShadowFrame* link_;
-  Method* method_;
-  uint32_t dex_pc_;
-  Object* references_[];
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_SHADOW_FRAME_H_
diff --git a/src/stack.cc b/src/stack.cc
index d724a72..336f8ad 100644
--- a/src/stack.cc
+++ b/src/stack.cc
@@ -17,178 +17,13 @@
 #include "stack.h"
 
 #include "compiler.h"
+#include "oat/runtime/context.h"
 #include "object.h"
 #include "object_utils.h"
 #include "thread_list.h"
 
 namespace art {
 
-bool Frame::HasMethod() const {
-  return GetMethod() != NULL && (!GetMethod()->IsCalleeSaveMethod());
-}
-
-void Frame::Next() {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-#else
-  size_t frame_size = GetMethod()->GetFrameSizeInBytes();
-  DCHECK_NE(frame_size, 0u);
-  DCHECK_LT(frame_size, 1024u);
-  byte* next_sp = reinterpret_cast<byte*>(sp_) + frame_size;
-  sp_ = reinterpret_cast<Method**>(next_sp);
-  if (*sp_ != NULL) {
-    DCHECK((*sp_)->GetClass() == Method::GetMethodClass() ||
-        (*sp_)->GetClass() == Method::GetConstructorClass());
-  }
-#endif
-}
-
-uintptr_t Frame::GetReturnPC() const {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-  return 0;
-#else
-  byte* pc_addr = reinterpret_cast<byte*>(sp_) + GetMethod()->GetReturnPcOffsetInBytes();
-  return *reinterpret_cast<uintptr_t*>(pc_addr);
-#endif
-}
-
-void Frame::SetReturnPC(uintptr_t pc) {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-#else
-  byte* pc_addr = reinterpret_cast<byte*>(sp_) + GetMethod()->GetReturnPcOffsetInBytes();
-  *reinterpret_cast<uintptr_t*>(pc_addr) = pc;
-#endif
-}
-
-/*
- * Return sp-relative offset for a Dalvik virtual register, compiler
- * spill or Method* in bytes using Method*.
- * Note that (reg >= 0) refers to a Dalvik register, (reg == -2)
- * denotes Method* and (reg <= -3) denotes a compiler temp.
- *
- *     +------------------------+
- *     | IN[ins-1]              |  {Note: resides in caller's frame}
- *     |       .                |
- *     | IN[0]                  |
- *     | caller's Method*       |
- *     +========================+  {Note: start of callee's frame}
- *     | core callee-save spill |  {variable sized}
- *     +------------------------+
- *     | fp callee-save spill   |
- *     +------------------------+
- *     | filler word            |  {For compatibility, if V[locals-1] used as wide
- *     +------------------------+
- *     | V[locals-1]            |
- *     | V[locals-2]            |
- *     |      .                 |
- *     |      .                 |  ... (reg == 2)
- *     | V[1]                   |  ... (reg == 1)
- *     | V[0]                   |  ... (reg == 0) <---- "locals_start"
- *     +------------------------+
- *     | Compiler temps         |  ... (reg == -2)
- *     |                        |  ... (reg == -3)
- *     |                        |  ... (reg == -4)
- *     +------------------------+
- *     | stack alignment padding|  {0 to (kStackAlignWords-1) of padding}
- *     +------------------------+
- *     | OUT[outs-1]            |
- *     | OUT[outs-2]            |
- *     |       .                |
- *     | OUT[0]                 |
- *     | curMethod*             |  ... (reg == -1) <<== sp, 16-byte aligned
- *     +========================+
- */
-int Frame::GetVRegOffset(const DexFile::CodeItem* code_item,
-                         uint32_t core_spills, uint32_t fp_spills,
-                         size_t frame_size, int reg) {
-  DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
-  int num_spills = __builtin_popcount(core_spills) + __builtin_popcount(fp_spills) + 1 /* filler */;
-  int num_ins = code_item->ins_size_;
-  int num_regs = code_item->registers_size_ - num_ins;
-  int locals_start = frame_size - ((num_spills + num_regs) * sizeof(uint32_t));
-  if (reg == -2) {
-    return 0;  // Method*
-  } else if (reg <= -3) {
-    return locals_start - ((reg + 1) * sizeof(uint32_t));  // Compiler temp
-  } else if (reg < num_regs) {
-    return locals_start + (reg * sizeof(uint32_t));        // Dalvik local reg
-  } else {
-    return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + sizeof(uint32_t); // Dalvik in
-  }
-}
-
-uint32_t Frame::GetVReg(const DexFile::CodeItem* code_item, uint32_t core_spills,
-                        uint32_t fp_spills, size_t frame_size, int vreg) const {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-  return 0;
-#else
-  int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
-  byte* vreg_addr = reinterpret_cast<byte*>(sp_) + offset;
-  return *reinterpret_cast<uint32_t*>(vreg_addr);
-#endif
-}
-
-uint32_t Frame::GetVReg(Method* m, int vreg) const {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-  return 0;
-#else
-  DCHECK(m == GetMethod());
-  const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
-  DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
-  uint32_t core_spills = m->GetCoreSpillMask();
-  uint32_t fp_spills = m->GetFpSpillMask();
-  size_t frame_size = m->GetFrameSizeInBytes();
-  return GetVReg(code_item, core_spills, fp_spills, frame_size, vreg);
-#endif
-}
-
-void Frame::SetVReg(Method* m, int vreg, uint32_t new_value) {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-#else
-  DCHECK(m == GetMethod());
-  const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
-  DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
-  uint32_t core_spills = m->GetCoreSpillMask();
-  uint32_t fp_spills = m->GetFpSpillMask();
-  size_t frame_size = m->GetFrameSizeInBytes();
-  int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
-  byte* vreg_addr = reinterpret_cast<byte*>(sp_) + offset;
-  *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
-#endif
-}
-
-uintptr_t Frame::LoadCalleeSave(int num) const {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-  return 0;
-#else
-  // Callee saves are held at the top of the frame
-  Method* method = GetMethod();
-  DCHECK(method != NULL);
-  size_t frame_size = method->GetFrameSizeInBytes();
-  byte* save_addr = reinterpret_cast<byte*>(sp_) + frame_size - ((num + 1) * kPointerSize);
-#if defined(__i386__)
-  save_addr -= kPointerSize;  // account for return address
-#endif
-  return *reinterpret_cast<uintptr_t*>(save_addr);
-#endif
-}
-
-Method* Frame::NextMethod() const {
-#if defined(ART_USE_LLVM_COMPILER)
-  LOG(FATAL) << "LLVM compiler don't support this function";
-  return NULL;
-#else
-  byte* next_sp = reinterpret_cast<byte*>(sp_) + GetMethod()->GetFrameSizeInBytes();
-  return *reinterpret_cast<Method**>(next_sp);
-#endif
-}
-
 class StackGetter {
  public:
   StackGetter(JNIEnv* env, Thread* thread) : env_(env), thread_(thread), trace_(NULL) {
@@ -219,4 +54,220 @@
   return stack_getter.GetTrace();
 }
 
+void ManagedStack::PushManagedStackFragment(ManagedStack* fragment) {
+  // Copy this top fragment into given fragment.
+  memcpy(fragment, this, sizeof(ManagedStack));
+  // Clear this fragment, which has become the top.
+  memset(this, 0, sizeof(ManagedStack));
+  // Link our top fragment onto the given fragment.
+  link_ = fragment;
+}
+
+void ManagedStack::PopManagedStackFragment(const ManagedStack& fragment) {
+  DCHECK(&fragment == link_);
+  // Copy this given fragment back to the top.
+  memcpy(this, &fragment, sizeof(ManagedStack));
+}
+
+size_t ManagedStack::NumShadowFrameReferences() const {
+  size_t count = 0;
+  for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+       current_fragment = current_fragment->GetLink()) {
+    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+         current_frame = current_frame->GetLink()) {
+      count += current_frame->NumberOfReferences();
+    }
+  }
+  return count;
+}
+
+bool ManagedStack::ShadowFramesContain(Object** shadow_frame_entry) const {
+  for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+       current_fragment = current_fragment->GetLink()) {
+    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+         current_frame = current_frame->GetLink()) {
+      if (current_frame->Contains(shadow_frame_entry)) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+uint32_t StackVisitor::GetDexPc() const {
+  if (cur_shadow_frame_ != NULL) {
+    return cur_shadow_frame_->GetDexPC();
+  } else if (cur_quick_frame_ != NULL) {
+    return GetMethod()->ToDexPC(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
+  } else {
+    return 0;
+  }
+}
+
+uint32_t StackVisitor::GetVReg(Method* m, int vreg) const {
+  DCHECK(m == GetMethod());
+  uint32_t core_spills = m->GetCoreSpillMask();
+  const VmapTable vmap_table(m->GetVmapTableRaw());
+  uint32_t vmap_offset;
+  // TODO: IsInContext stops before spotting floating point registers.
+  if (vmap_table.IsInContext(vreg, vmap_offset)) {
+    // Compute the register we need to load from the context.
+    uint32_t spill_mask = core_spills;
+    CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
+    uint32_t matches = 0;
+    uint32_t spill_shifts = 0;
+    while (matches != (vmap_offset + 1)) {
+      DCHECK_NE(spill_mask, 0u);
+      matches += spill_mask & 1;  // Add 1 if the low bit is set.
+      spill_mask >>= 1;
+      spill_shifts++;
+    }
+    spill_shifts--;  // Wind back one as we want the last match.
+    return GetGPR(spill_shifts);
+  } else {
+    const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+    DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
+    uint32_t fp_spills = m->GetFpSpillMask();
+    size_t frame_size = m->GetFrameSizeInBytes();
+    return GetVReg(code_item, core_spills, fp_spills, frame_size, vreg);
+  }
+}
+
+void StackVisitor::SetVReg(Method* m, int vreg, uint32_t new_value) {
+  DCHECK(m == GetMethod());
+  const VmapTable vmap_table(m->GetVmapTableRaw());
+  uint32_t vmap_offset;
+  // TODO: IsInContext stops before spotting floating point registers.
+  if (vmap_table.IsInContext(vreg, vmap_offset)) {
+    UNIMPLEMENTED(FATAL);
+  }
+  const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+  DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
+  uint32_t core_spills = m->GetCoreSpillMask();
+  uint32_t fp_spills = m->GetFpSpillMask();
+  size_t frame_size = m->GetFrameSizeInBytes();
+  int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
+  byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
+  *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
+}
+
+uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
+  return context_->GetGPR(reg);
+}
+
+uintptr_t StackVisitor::GetReturnPc() const {
+  Method** sp = GetCurrentQuickFrame();
+  CHECK(sp != NULL);
+  byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+  return *reinterpret_cast<uintptr_t*>(pc_addr);
+}
+
+void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
+  Method** sp = GetCurrentQuickFrame();
+  CHECK(sp != NULL);
+  byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+  *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
+}
+
+size_t StackVisitor::ComputeNumFrames() const {
+  struct NumFramesVisitor : public StackVisitor {
+    explicit NumFramesVisitor(const ManagedStack* stack,
+                              const std::vector<TraceStackFrame>* trace_stack) :
+                                StackVisitor(stack, trace_stack), frames(0) {}
+
+    virtual bool VisitFrame() {
+      frames++;
+      return true;
+    }
+    size_t frames;
+  };
+
+  NumFramesVisitor visitor(stack_start_, trace_stack_);
+  visitor.WalkStack(true);
+  return visitor.frames;
+}
+
+void StackVisitor::SanityCheckFrame() {
+#ifndef NDEBUG
+  Method* method = GetMethod();
+  CHECK(method->GetClass() == Method::GetMethodClass() ||
+        method->GetClass() == Method::GetConstructorClass());
+  if (cur_quick_frame_ != NULL) {
+    method->AssertPcIsWithinCode(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
+    // Frame sanity.
+    size_t frame_size = method->GetFrameSizeInBytes();
+    CHECK_NE(frame_size, 0u);
+    CHECK_LT(frame_size, 1024u);
+    size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
+    CHECK_LT(return_pc_offset, frame_size);
+  }
+#endif
+}
+
+void StackVisitor::WalkStack(bool include_transitions) {
+  bool method_tracing_active = Runtime::Current()->IsMethodTracingActive();
+  uint32_t trace_stack_depth = 0;
+  for (const ManagedStack* current_fragment = stack_start_; current_fragment != NULL;
+       current_fragment = current_fragment->GetLink()) {
+    cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
+    cur_quick_frame_ = current_fragment->GetTopQuickFrame();
+    cur_quick_frame_pc_ = current_fragment->GetTopQuickFramePc();
+    if (cur_quick_frame_ != NULL) {  // Handle quick stack frames.
+      // Can't be both a shadow and a quick fragment.
+      DCHECK(current_fragment->GetTopShadowFrame() == NULL);
+      Method* method = *cur_quick_frame_;
+      do {
+        SanityCheckFrame();
+        bool should_continue = VisitFrame();
+        if (UNLIKELY(!should_continue)) {
+          return;
+        }
+        if (context_ != NULL) {
+          context_->FillCalleeSaves(*this);
+        }
+        size_t frame_size = method->GetFrameSizeInBytes();
+        // Compute PC for next stack frame from return PC.
+        size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
+        byte* return_pc_addr = reinterpret_cast<byte*>(cur_quick_frame_) + return_pc_offset;
+        uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
+        if (UNLIKELY(method_tracing_active)) {
+          // While profiling, the return pc is restored from the side stack, except when walking
+          // the stack for an exception where the side stack will be unwound in VisitFrame.
+          // TODO: stop using include_transitions as a proxy for is this the catch block visitor.
+          if (IsTraceExitPc(return_pc) && !include_transitions) {
+            // TODO: unify trace and managed stack.
+            TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth);
+            trace_stack_depth++;
+            CHECK(trace_frame.method_ == GetMethod()) << "Excepted: " << PrettyMethod(method)
+                << " Found: " << PrettyMethod(GetMethod());
+            return_pc = trace_frame.return_pc_;
+          }
+        }
+        cur_quick_frame_pc_ = return_pc;
+        byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
+        cur_quick_frame_ = reinterpret_cast<Method**>(next_frame);
+        cur_depth_++;
+        method = *cur_quick_frame_;
+      } while (method != NULL);
+    } else if (cur_shadow_frame_ != NULL) {
+      do {
+        SanityCheckFrame();
+        bool should_continue = VisitFrame();
+        if (UNLIKELY(!should_continue)) {
+          return;
+        }
+        cur_depth_++;
+        cur_shadow_frame_ = cur_shadow_frame_->GetLink();
+      } while(cur_shadow_frame_ != NULL);
+    }
+    cur_depth_++;
+    if (include_transitions) {
+      bool should_continue = VisitFrame();
+      if (!should_continue) {
+        return;
+      }
+    }
+  }
+}
+
 }  // namespace art
diff --git a/src/stack.h b/src/stack.h
index a26b632..26afc0a 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -18,75 +18,377 @@
 #define ART_SRC_STACK_H_
 
 #include "dex_file.h"
+#include "heap.h"
+#include "jdwp/jdwp.h"
 #include "jni.h"
 #include "macros.h"
+#include "oat/runtime/context.h"
+#include "trace.h"
 
 #include <stdint.h>
 
 namespace art {
 
 class Method;
+class Object;
+class ShadowFrame;
 class Thread;
 
 jobject GetThreadStack(JNIEnv*, Thread*);
 
-struct NativeToManagedRecord {
-  NativeToManagedRecord* link_;
-  void* last_top_of_managed_stack_;
-  uintptr_t last_top_of_managed_stack_pc_;
-};
-
-// Iterator over managed frames up to the first native-to-managed transition.
-class PACKED Frame {
+class ShadowFrame {
  public:
-  Frame() : sp_(NULL) {}
+  // Number of references contained within this shadow frame
+  uint32_t NumberOfReferences() const {
+    return number_of_references_;
+  }
 
-  explicit Frame(Method** sp) : sp_(sp) {}
+  void SetNumberOfReferences(uint32_t number_of_references) {
+    number_of_references_ = number_of_references;
+  }
+
+  // Caller dex pc
+  uint32_t GetDexPC() const {
+    return dex_pc_;
+  }
+
+  void SetDexPC(uint32_t dex_pc) {
+    dex_pc_ = dex_pc;
+  }
+
+  // Link to previous shadow frame or NULL
+  ShadowFrame* GetLink() const {
+    return link_;
+  }
+
+  void SetLink(ShadowFrame* frame) {
+    DCHECK_NE(this, frame);
+    link_ = frame;
+  }
+
+  Object* GetReference(size_t i) const {
+    DCHECK_LT(i, number_of_references_);
+    return references_[i];
+  }
+
+  void SetReference(size_t i, Object* object) {
+    DCHECK_LT(i, number_of_references_);
+    references_[i] = object;
+  }
 
   Method* GetMethod() const {
-    return (sp_ != NULL) ? *sp_ : NULL;
+    DCHECK_NE(method_, static_cast<void*>(NULL));
+    return method_;
   }
 
-  bool HasNext() const {
-    return NextMethod() != NULL;
+  void SetMethod(Method* method) {
+    DCHECK_NE(method, static_cast<void*>(NULL));
+    method_ = method;
   }
 
-  void Next();
-
-  uintptr_t GetReturnPC() const;
-
-  void SetReturnPC(uintptr_t pc);
-
-  uintptr_t LoadCalleeSave(int num) const;
-
-  static int GetVRegOffset(const DexFile::CodeItem* code_item, uint32_t core_spills,
-                           uint32_t fp_spills, size_t frame_size, int reg);
-
-  uint32_t GetVReg(const DexFile::CodeItem* code_item, uint32_t core_spills, uint32_t fp_spills,
-                    size_t frame_size, int vreg) const;
-
-  uint32_t GetVReg(Method* m, int vreg) const;
-  void SetVReg(Method* method, int vreg, uint32_t new_value);
-
-  Method** GetSP() const {
-    return sp_;
+  bool Contains(Object** shadow_frame_entry) const {
+    return ((&references_[0] <= shadow_frame_entry) &&
+            (shadow_frame_entry <= (&references_[number_of_references_ - 1])));
   }
 
-  void SetSP(Method** sp) {
-    sp_ = sp;
+  void VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+    size_t num_refs = NumberOfReferences();
+    for (size_t j = 0; j < num_refs; j++) {
+      Object* object = GetReference(j);
+      if (object != NULL) {
+        visitor(object, arg);
+      }
+    }
   }
 
-  // Is this a frame for a real method (native or with dex code)
-  bool HasMethod() const;
+  // Offset of link within shadow frame
+  static size_t LinkOffset() {
+    return OFFSETOF_MEMBER(ShadowFrame, link_);
+  }
+
+  // Offset of method within shadow frame
+  static size_t MethodOffset() {
+    return OFFSETOF_MEMBER(ShadowFrame, method_);
+  }
+
+  // Offset of dex pc within shadow frame
+  static size_t DexPCOffset() {
+    return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
+  }
+
+  // Offset of length within shadow frame
+  static size_t NumberOfReferencesOffset() {
+    return OFFSETOF_MEMBER(ShadowFrame, number_of_references_);
+  }
+
+  // Offset of references within shadow frame
+  static size_t ReferencesOffset() {
+    return OFFSETOF_MEMBER(ShadowFrame, references_);
+  }
 
  private:
-  Method* NextMethod() const;
+  // ShadowFrame should be allocated by the generated code directly.
+  // We should not create new shadow stack in the runtime support function.
+  ~ShadowFrame() {}
 
-  friend class Thread;
+  uint32_t number_of_references_;
+  ShadowFrame* link_;
+  Method* method_;
+  uint32_t dex_pc_;
+  Object* references_[];
 
-  Method** sp_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
 };
 
+// The managed stack is used to record fragments of managed code stacks. Managed code stacks
+// may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
+// necessary for transitions between code using different frame layouts and transitions into native
+// code.
+class PACKED ManagedStack {
+ public:
+  ManagedStack() : link_(NULL), top_shadow_frame_(NULL),
+                   top_quick_frame_(NULL), top_quick_frame_pc_(0) {}
+  void PushManagedStackFragment(ManagedStack* fragment);
+  void PopManagedStackFragment(const ManagedStack& record);
+
+  ManagedStack* GetLink() const {
+    return link_;
+  }
+
+  Method** GetTopQuickFrame() const {
+    return top_quick_frame_;
+  }
+
+  void SetTopQuickFrame(Method** top) {
+    top_quick_frame_ = top;
+  }
+
+  uintptr_t GetTopQuickFramePc() const {
+    return top_quick_frame_pc_;
+  }
+
+  void SetTopQuickFramePc(uintptr_t pc) {
+    top_quick_frame_pc_ = pc;
+  }
+
+  static size_t TopQuickFrameOffset() {
+    return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
+  }
+
+  static size_t TopQuickFramePcOffset() {
+    return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_pc_);
+  }
+
+  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
+    ShadowFrame* old_frame = top_shadow_frame_;
+    top_shadow_frame_ = new_top_frame;
+    new_top_frame->SetLink(old_frame);
+    return old_frame;
+  }
+
+  ShadowFrame* PopShadowFrame() {
+    CHECK(top_shadow_frame_ != NULL);
+    ShadowFrame* frame = top_shadow_frame_;
+    top_shadow_frame_ = frame->GetLink();
+    return frame;
+  }
+
+  ShadowFrame* GetTopShadowFrame() const {
+    return top_shadow_frame_;
+  }
+
+  static size_t TopShadowFrameOffset() {
+    return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
+  }
+
+  size_t NumShadowFrameReferences() const;
+
+  bool ShadowFramesContain(Object** shadow_frame_entry) const;
+
+ private:
+  ManagedStack* link_;
+  ShadowFrame* top_shadow_frame_;
+  Method** top_quick_frame_;
+  uintptr_t top_quick_frame_pc_;
+};
+
+class StackVisitor {
+ protected:
+  StackVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+               Context* context = NULL) :
+    stack_start_(stack), trace_stack_(trace_stack), cur_shadow_frame_(NULL), cur_quick_frame_(NULL),
+    cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0), context_(context) {}
+
+ public:
+  virtual ~StackVisitor() {}
+
+  // Return 'true' if we should continue to visit more frames, 'false' to stop.
+  virtual bool VisitFrame() = 0;
+
+  void WalkStack(bool include_transitions = false);
+
+  Method* GetMethod() const {
+    if (cur_shadow_frame_ != NULL) {
+      return cur_shadow_frame_->GetMethod();
+    } else if (cur_quick_frame_ != NULL) {
+      return *cur_quick_frame_;
+    } else {
+      return NULL;
+    }
+  }
+
+  bool IsShadowFrame() const {
+    return cur_shadow_frame_ != NULL;
+  }
+
+  uintptr_t LoadCalleeSave(int num, size_t frame_size) const {
+    // Callee saves are held at the top of the frame
+    Method* method = GetMethod();
+    DCHECK(method != NULL);
+    byte* save_addr =
+        reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
+#if defined(__i386__)
+    save_addr -= kPointerSize;  // account for return address
+#endif
+    return *reinterpret_cast<uintptr_t*>(save_addr);
+  }
+
+  uint32_t GetDexPc() const;
+
+  // Gets the height of the stack in the managed stack frames, including transitions.
+  size_t GetFrameHeight() {
+    return GetNumFrames() - cur_depth_;
+  }
+
+  // Get a frame ID where 0 is a special value.
+  size_t GetFrameId() {
+    return GetFrameHeight() + 1;
+  }
+
+  size_t GetNumFrames() {
+    if (num_frames_ == 0) {
+      num_frames_ = ComputeNumFrames();
+    }
+    return num_frames_;
+  }
+
+  uint32_t GetVReg(Method* m, int vreg) const;
+
+  void SetVReg(Method* m, int vreg, uint32_t new_value);
+
+  uintptr_t GetGPR(uint32_t reg) const;
+
+  uint32_t GetVReg(const DexFile::CodeItem* code_item, uint32_t core_spills,
+                   uint32_t fp_spills, size_t frame_size, int vreg) const {
+    int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
+    byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
+    return *reinterpret_cast<uint32_t*>(vreg_addr);
+  }
+
+  uintptr_t GetReturnPc() const;
+
+  void SetReturnPc(uintptr_t new_ret_pc);
+
+  /*
+   * Return sp-relative offset for a Dalvik virtual register, compiler
+   * spill or Method* in bytes using Method*.
+   * Note that (reg >= 0) refers to a Dalvik register, (reg == -2)
+   * denotes Method* and (reg <= -3) denotes a compiler temp.
+   *
+   *     +------------------------+
+   *     | IN[ins-1]              |  {Note: resides in caller's frame}
+   *     |       .                |
+   *     | IN[0]                  |
+   *     | caller's Method*       |
+   *     +========================+  {Note: start of callee's frame}
+   *     | core callee-save spill |  {variable sized}
+   *     +------------------------+
+   *     | fp callee-save spill   |
+   *     +------------------------+
+   *     | filler word            |  {For compatibility, if V[locals-1] used as wide
+   *     +------------------------+
+   *     | V[locals-1]            |
+   *     | V[locals-2]            |
+   *     |      .                 |
+   *     |      .                 |  ... (reg == 2)
+   *     | V[1]                   |  ... (reg == 1)
+   *     | V[0]                   |  ... (reg == 0) <---- "locals_start"
+   *     +------------------------+
+   *     | Compiler temps         |  ... (reg == -2)
+   *     |                        |  ... (reg == -3)
+   *     |                        |  ... (reg == -4)
+   *     +------------------------+
+   *     | stack alignment padding|  {0 to (kStackAlignWords-1) of padding}
+   *     +------------------------+
+   *     | OUT[outs-1]            |
+   *     | OUT[outs-2]            |
+   *     |       .                |
+   *     | OUT[0]                 |
+   *     | curMethod*             |  ... (reg == -1) <<== sp, 16-byte aligned
+   *     +========================+
+   */
+  static int GetVRegOffset(const DexFile::CodeItem* code_item,
+                    uint32_t core_spills, uint32_t fp_spills,
+                    size_t frame_size, int reg) {
+    DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+    int num_spills = __builtin_popcount(core_spills) + __builtin_popcount(fp_spills) + 1; // Filler.
+    int num_ins = code_item->ins_size_;
+    int num_regs = code_item->registers_size_ - num_ins;
+    int locals_start = frame_size - ((num_spills + num_regs) * sizeof(uint32_t));
+    if (reg == -2) {
+      return 0;  // Method*
+    } else if (reg <= -3) {
+      return locals_start - ((reg + 1) * sizeof(uint32_t));  // Compiler temp.
+    } else if (reg < num_regs) {
+      return locals_start + (reg * sizeof(uint32_t));        // Dalvik local reg.
+    } else {
+      return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + sizeof(uint32_t); // Dalvik in.
+    }
+  }
+
+  uintptr_t GetCurrentQuickFramePc() const {
+    return cur_quick_frame_pc_;
+  }
+
+  Method** GetCurrentQuickFrame() const {
+    return cur_quick_frame_;
+  }
+
+  ShadowFrame* GetCurrentShadowFrame() const {
+    return cur_shadow_frame_;
+  }
+
+ private:
+  size_t ComputeNumFrames() const;
+
+  TraceStackFrame GetTraceStackFrame(uint32_t depth) const {
+    return trace_stack_->at(trace_stack_->size() - depth - 1);
+  }
+
+  void SanityCheckFrame();
+
+  const ManagedStack* const stack_start_;
+  const std::vector<TraceStackFrame>* const trace_stack_;
+  ShadowFrame* cur_shadow_frame_;
+  Method** cur_quick_frame_;
+  uintptr_t cur_quick_frame_pc_;
+  // Lazily computed, number of frames in the stack.
+  size_t num_frames_;
+  // Depth of the frame we're currently at.
+  size_t cur_depth_;
+ protected:
+  Context* const context_;
+};
+
+static inline uintptr_t AdjustQuickFramePcForDexPcComputation(uintptr_t pc) {
+  // Quick methods record a mapping from quick PCs to Dex PCs at the beginning of the code for
+  // each dex instruction. When walking the stack, the return PC will be set to the instruction
+  // following call which will likely be the start of the next dex instruction. Adjust the PC
+  // for these cases by 2 bytes in case the return PC also has the thumb bit set.
+  if (pc > 0) { pc -= 2; }
+  return pc;
+}
+
 }  // namespace art
 
 #endif  // ART_SRC_STACK_H_
diff --git a/src/thread.cc b/src/thread.cc
index 457de6a..0192313 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -42,7 +42,6 @@
 #include "runtime_support.h"
 #include "scoped_jni_thread_state.h"
 #include "ScopedLocalRef.h"
-#include "shadow_frame.h"
 #include "space.h"
 #include "stack.h"
 #include "stack_indirect_reference_table.h"
@@ -528,44 +527,10 @@
   Thread::DumpState(os, this, GetTid());
 }
 
-#if !defined(ART_USE_LLVM_COMPILER)
-void Thread::PushNativeToManagedRecord(NativeToManagedRecord* record) {
-  Method **sp = top_of_managed_stack_.GetSP();
-#ifndef NDEBUG
-  if (sp != NULL) {
-    Method* m = *sp;
-    Runtime::Current()->GetHeap()->VerifyObject(m);
-    DCHECK((m == NULL) || m->IsMethod());
-  }
-#endif
-  record->last_top_of_managed_stack_ = reinterpret_cast<void*>(sp);
-  record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_;
-  record->link_ = native_to_managed_record_;
-  native_to_managed_record_ = record;
-  top_of_managed_stack_.SetSP(NULL);
-}
-#else
-void Thread::PushNativeToManagedRecord(NativeToManagedRecord*) {
-  LOG(FATAL) << "Called non-LLVM method with LLVM";
-}
-#endif
-
-#if !defined(ART_USE_LLVM_COMPILER)
-void Thread::PopNativeToManagedRecord(const NativeToManagedRecord& record) {
-  native_to_managed_record_ = record.link_;
-  top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_));
-  top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_;
-}
-#else
-void Thread::PopNativeToManagedRecord(const NativeToManagedRecord&) {
-  LOG(FATAL) << "Called non-LLVM method with LLVM";
-}
-#endif
-
-struct StackDumpVisitor : public Thread::StackVisitor {
-  StackDumpVisitor(std::ostream& os, const Thread* thread)
-      : last_method(NULL), last_line_number(0), repetition_count(0), os(os), thread(thread),
-        frame_count(0) {
+struct StackDumpVisitor : public StackVisitor {
+  StackDumpVisitor(std::ostream& os, const Thread* thread) :
+    StackVisitor(thread->GetManagedStack(), thread->GetTraceStack()), last_method(NULL),
+    last_line_number(0), repetition_count(0), os(os), thread(thread), frame_count(0) {
   }
 
   virtual ~StackDumpVisitor() {
@@ -574,19 +539,19 @@
     }
   }
 
-  bool VisitFrame(const Frame& frame, uintptr_t pc) {
-    if (!frame.HasMethod()) {
+  bool VisitFrame() {
+    Method* m = GetMethod();
+    if (m->IsRuntimeMethod()) {
       return true;
     }
     const int kMaxRepetition = 3;
-    Method* m = frame.GetMethod();
     Class* c = m->GetDeclaringClass();
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     const DexCache* dex_cache = c->GetDexCache();
     int line_number = -1;
     if (dex_cache != NULL) {  // be tolerant of bad input
       const DexFile& dex_file = class_linker->FindDexFile(dex_cache);
-      line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc));
+      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
     }
     if (line_number == last_line_number && last_method == m) {
       repetition_count++;
@@ -632,7 +597,7 @@
     DumpNativeStack(os, GetTid(), "  native: ", false);
   }
   StackDumpVisitor dumper(os, this);
-  WalkStack(&dumper);
+  dumper.WalkStack();
 }
 
 void Thread::SetStateWithoutSuspendCheck(ThreadState new_state) {
@@ -643,6 +608,11 @@
 
 ThreadState Thread::SetState(ThreadState new_state) {
   ThreadState old_state = state_;
+  if (old_state == kRunnable) {
+    // Non-runnable states are points where we expect thread suspension can occur.
+    AssertThreadSuspensionIsAllowable();
+  }
+
   if (old_state == new_state) {
     return old_state;
   }
@@ -701,6 +671,7 @@
      * the thread is supposed to be suspended.  This is possibly faster
      * on SMP and slightly more correct, but less convenient.
      */
+    AssertThreadSuspensionIsAllowable();
     android_atomic_acquire_store(new_state, addr);
     ANNOTATE_IGNORE_READS_BEGIN();
     int suspend_count = suspend_count_;
@@ -813,35 +784,35 @@
 }
 
 Thread::Thread()
-    : thin_lock_id_(0),
-      tid_(0),
+    : suspend_count_(0),
+      card_table_(NULL),
+      exception_(NULL),
+      stack_end_(NULL),
+      managed_stack_(),
+      jni_env_(NULL),
+      self_(NULL),
+      state_(kNative),
       peer_(NULL),
-      top_of_managed_stack_(),
-      top_of_managed_stack_pc_(0),
+      stack_begin_(NULL),
+      stack_size_(0),
+      thin_lock_id_(0),
+      tid_(0),
       wait_mutex_(new Mutex("a thread wait mutex")),
       wait_cond_(new ConditionVariable("a thread wait condition variable")),
       wait_monitor_(NULL),
       interrupted_(false),
       wait_next_(NULL),
       monitor_enter_object_(NULL),
-      card_table_(0),
-      stack_end_(NULL),
-      native_to_managed_record_(NULL),
       top_sirt_(NULL),
-      top_shadow_frame_(NULL),
-      jni_env_(NULL),
-      state_(kNative),
-      self_(NULL),
       runtime_(NULL),
-      exception_(NULL),
-      suspend_count_(0),
-      debug_suspend_count_(0),
       class_loader_override_(NULL),
       long_jump_context_(NULL),
       throwing_OutOfMemoryError_(false),
+      debug_suspend_count_(0),
       debug_invoke_req_(new DebugInvokeReq),
       trace_stack_(new std::vector<TraceStackFrame>),
-      name_(new std::string(kThreadNameDuringStartup)) {
+      name_(new std::string(kThreadNameDuringStartup)),
+      no_thread_suspension_(0) {
   CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
   memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
 }
@@ -968,14 +939,6 @@
   return count;
 }
 
-size_t Thread::NumShadowFrameReferences() {
-  size_t count = 0;
-  for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) {
-    count += cur->NumberOfReferences();
-  }
-  return count;
-}
-
 bool Thread::SirtContains(jobject obj) {
   Object** sirt_entry = reinterpret_cast<Object**>(obj);
   for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
@@ -983,21 +946,8 @@
       return true;
     }
   }
-  return false;
-}
-
-bool Thread::ShadowFrameContains(jobject obj) {
-  Object** shadow_frame_entry = reinterpret_cast<Object**>(obj);
-  for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) {
-    if (cur->Contains(shadow_frame_entry)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool Thread::StackReferencesContain(jobject obj) {
-  return SirtContains(obj) || ShadowFrameContains(obj);
+  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
+  return managed_stack_.ShadowFramesContain(sirt_entry);
 }
 
 void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) {
@@ -1012,18 +962,6 @@
   }
 }
 
-void Thread::ShadowFrameVisitRoots(Heap::RootVisitor* visitor, void* arg) {
-  for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) {
-    size_t num_refs = cur->NumberOfReferences();
-    for (size_t j = 0; j < num_refs; j++) {
-      Object* object = cur->GetReference(j);
-      if (object != NULL) {
-        visitor(object, arg);
-      }
-    }
-  }
-}
-
 Object* Thread::DecodeJObject(jobject obj) {
   DCHECK(CanAccessDirectReferences());
   if (obj == NULL) {
@@ -1063,7 +1001,7 @@
   default:
     // TODO: make stack indirect reference table lookup more efficient
     // Check if this is a local reference in the SIRT
-    if (StackReferencesContain(obj)) {
+    if (SirtContains(obj)) {
       result = *reinterpret_cast<Object**>(obj);  // Read from SIRT
     } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
       // Assume an invalid local reference is actually a direct pointer.
@@ -1083,20 +1021,24 @@
   return result;
 }
 
-class CountStackDepthVisitor : public Thread::StackVisitor {
+class CountStackDepthVisitor : public StackVisitor {
  public:
-  CountStackDepthVisitor() : depth_(0), skip_depth_(0), skipping_(true) {}
+  CountStackDepthVisitor(const ManagedStack* stack,
+                         const std::vector<TraceStackFrame>* trace_stack) :
+                           StackVisitor(stack, trace_stack), depth_(0), skip_depth_(0),
+                           skipping_(true) {}
 
-  bool VisitFrame(const Frame& frame, uintptr_t /*pc*/) {
+  bool VisitFrame() {
     // We want to skip frames up to and including the exception's constructor.
     // Note we also skip the frame if it doesn't have a method (namely the callee
     // save frame)
-    if (skipping_ && frame.HasMethod() &&
-        !Throwable::GetJavaLangThrowable()->IsAssignableFrom(frame.GetMethod()->GetDeclaringClass())) {
+    Method* m = GetMethod();
+    if (skipping_ && !m->IsRuntimeMethod() &&
+        !Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
       skipping_ = false;
     }
     if (!skipping_) {
-      if (frame.HasMethod()) {  // ignore callee save frames
+      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
         ++depth_;
       }
     } else {
@@ -1119,90 +1061,72 @@
   bool skipping_;
 };
 
-class BuildInternalStackTraceVisitor : public Thread::StackVisitor {
+class BuildInternalStackTraceVisitor : public StackVisitor {
  public:
-  explicit BuildInternalStackTraceVisitor(int skip_depth)
-      : skip_depth_(skip_depth), count_(0), pc_trace_(NULL), method_trace_(NULL), local_ref_(NULL) {
-  }
+  explicit BuildInternalStackTraceVisitor(const ManagedStack* stack,
+                                          const std::vector<TraceStackFrame>* trace_stack,
+                                          int skip_depth) :
+    StackVisitor(stack, trace_stack), skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL),
+    method_trace_(NULL) {}
 
   bool Init(int depth, ScopedJniThreadState& ts) {
     // Allocate method trace with an extra slot that will hold the PC trace
-    method_trace_ = Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1);
-    if (method_trace_ == NULL) {
+    SirtRef<ObjectArray<Object> >
+      method_trace(Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1));
+    if (method_trace.get() == NULL) {
       return false;
     }
-    // Register a local reference as IntArray::Alloc may trigger GC
-    local_ref_ = AddLocalReference<jobject>(ts.Env(), method_trace_);
-    pc_trace_ = IntArray::Alloc(depth);
-    if (pc_trace_ == NULL) {
+    IntArray* dex_pc_trace = IntArray::Alloc(depth);
+    if (dex_pc_trace == NULL) {
       return false;
     }
-#ifdef MOVING_GARBAGE_COLLECTOR
-    // Re-read after potential GC
-    method_trace_ = Decode<ObjectArray<Object>*>(ts.Env(), local_ref_);
-#endif
     // Save PC trace in last element of method trace, also places it into the
     // object graph.
-    method_trace_->Set(depth, pc_trace_);
+    method_trace->Set(depth, dex_pc_trace);
+    // Set the Object*s and assert that no thread suspension is now possible.
+    ts.Self()->StartAssertNoThreadSuspension();
+    method_trace_ = method_trace.get();
+    dex_pc_trace_ = dex_pc_trace;
     return true;
   }
 
-  virtual ~BuildInternalStackTraceVisitor() {}
+  virtual ~BuildInternalStackTraceVisitor() {
+    Thread::Current()->EndAssertNoThreadSuspension();
+  }
 
-  bool VisitFrame(const Frame& frame, uintptr_t pc) {
-    if (method_trace_ == NULL || pc_trace_ == NULL) {
+  bool VisitFrame() {
+    if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
       return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
     }
     if (skip_depth_ > 0) {
       skip_depth_--;
       return true;
     }
-    if (!frame.HasMethod()) {
-      return true;  // ignore callee save frames
+    Method* m = GetMethod();
+    if (m->IsRuntimeMethod()) {
+      return true;  // Ignore runtime frames (in particular callee save).
     }
-    method_trace_->Set(count_, frame.GetMethod());
-    pc_trace_->Set(count_, pc);
+    method_trace_->Set(count_, m);
+    dex_pc_trace_->Set(count_, GetDexPc());
     ++count_;
     return true;
   }
 
-  jobject GetInternalStackTrace() const {
-    return local_ref_;
+  ObjectArray<Object>* GetInternalStackTrace() const {
+    return method_trace_;
   }
 
  private:
   // How many more frames to skip.
   int32_t skip_depth_;
-  // Current position down stack trace
+  // Current position down stack trace.
   uint32_t count_;
-  // Array of return PC values
-  IntArray* pc_trace_;
-  // An array of the methods on the stack, the last entry is a reference to the
-  // PC trace
+  // Array of dex PC values.
+  IntArray* dex_pc_trace_;
+  // An array of the methods on the stack, the last entry is a reference to the PC trace.
   ObjectArray<Object>* method_trace_;
-  // Local indirect reference table entry for method trace
-  jobject local_ref_;
 };
 
-#if !defined(ART_USE_LLVM_COMPILER)
-// TODO: remove this.
-static uintptr_t ManglePc(uintptr_t pc) {
-  // Move the PC back 2 bytes as a call will frequently terminate the
-  // decoding of a particular instruction and we want to make sure we
-  // get the Dex PC of the instruction with the call and not the
-  // instruction following.
-  if (pc > 0) { pc -= 2; }
-  return pc;
-}
-#endif
-
-// TODO: remove this.
-static uintptr_t DemanglePc(uintptr_t pc) {
-  // Revert mangling for the case where we need the PC to return to the upcall
-  if (pc > 0) { pc +=  2; }
-  return pc;
-}
-
 void Thread::PushSirt(StackIndirectReferenceTable* sirt) {
   sirt->SetLink(top_sirt_);
   top_sirt_ = sirt;
@@ -1215,112 +1139,10 @@
   return sirt;
 }
 
-#if !defined(ART_USE_LLVM_COMPILER) // LLVM use ShadowFrame
-
-void Thread::WalkStack(StackVisitor* visitor, bool include_upcalls) const {
-  Frame frame = GetTopOfStack();
-  uintptr_t pc = ManglePc(top_of_managed_stack_pc_);
-  uint32_t trace_stack_depth = 0;
-  // TODO: enable this CHECK after native_to_managed_record_ is initialized during startup.
-  // CHECK(native_to_managed_record_ != NULL);
-  NativeToManagedRecord* record = native_to_managed_record_;
-  bool method_tracing_active = Runtime::Current()->IsMethodTracingActive();
-  while (frame.GetSP() != NULL) {
-    for ( ; frame.GetMethod() != NULL; frame.Next()) {
-      frame.GetMethod()->AssertPcIsWithinCode(pc);
-      bool should_continue = visitor->VisitFrame(frame, pc);
-      if (UNLIKELY(!should_continue)) {
-        return;
-      }
-      uintptr_t return_pc = frame.GetReturnPC();
-      if (LIKELY(!method_tracing_active)) {
-        pc = ManglePc(return_pc);
-      } else {
-        // While profiling, the return pc is restored from the side stack, except when walking
-        // the stack for an exception where the side stack will be unwound in VisitFrame.
-        if (IsTraceExitPc(return_pc) && !include_upcalls) {
-          TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth++);
-          CHECK(trace_frame.method_ == frame.GetMethod());
-          pc = ManglePc(trace_frame.return_pc_);
-        } else {
-          pc = ManglePc(return_pc);
-        }
-      }
-    }
-    if (include_upcalls) {
-      bool should_continue = visitor->VisitFrame(frame, pc);
-      if (!should_continue) {
-        return;
-      }
-    }
-    if (record == NULL) {
-      return;
-    }
-    // last_tos should return Frame instead of sp?
-    frame.SetSP(reinterpret_cast<Method**>(record->last_top_of_managed_stack_));
-    pc = ManglePc(record->last_top_of_managed_stack_pc_);
-    record = record->link_;
-  }
-}
-
-#else // defined(ART_USE_LLVM_COMPILER) // LLVM uses ShadowFrame
-
-void Thread::WalkStack(StackVisitor* visitor, bool /*include_upcalls*/) const {
-  for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) {
-    Frame frame;
-    frame.SetSP(reinterpret_cast<Method**>(reinterpret_cast<byte*>(cur) +
-                                           ShadowFrame::MethodOffset()));
-    bool should_continue = visitor->VisitFrame(frame, cur->GetDexPC());
-    if (!should_continue) {
-      return;
-    }
-  }
-}
-
-/*
- *                                |                        |
- *                                |                        |
- *                                |                        |
- *                                |      .                 |
- *                                |      .                 |
- *                                |      .                 |
- *                                |      .                 |
- *                                | Method*                |
- *                                |      .                 |
- *                                |      .                 | <-- top_shadow_frame_   (ShadowFrame*)
- *                              / +------------------------+
- *                              ->|      .                 |
- *                              . |      .                 |
- *                              . |      .                 |
- *                               /+------------------------+
- *                              / |      .                 |
- *                             /  |      .                 |
- *     ---                     |  |      .                 |
- *      |                      |  |      .                 |
- *                             |  | Method*                | <-- frame.GetSP() (Method**)
- *  ShadowFrame                \  |      .                 |
- *      |                       ->|      .                 | <-- cur           (ShadowFrame*)
- *     ---                       /+------------------------+
- *                              / |      .                 |
- *                             /  |      .                 |
- *     ---                     |  |      .                 |
- *      |       cur->GetLink() |  |      .                 |
- *                             |  | Method*                |
- *   ShadowFrame               \  |      .                 |
- *      |                       ->|      .                 |
- *     ---                        +------------------------+
- *                                |      .                 |
- *                                |      .                 |
- *                                |      .                 |
- *                                +========================+
- */
-
-#endif
-
 jobject Thread::CreateInternalStackTrace(JNIEnv* env) const {
   // Compute depth of stack
-  CountStackDepthVisitor count_visitor;
-  WalkStack(&count_visitor);
+  CountStackDepthVisitor count_visitor(GetManagedStack(), GetTraceStack());
+  count_visitor.WalkStack();
   int32_t depth = count_visitor.GetDepth();
   int32_t skip_depth = count_visitor.GetSkipDepth();
 
@@ -1328,12 +1150,13 @@
   ScopedJniThreadState ts(env);
 
   // Build internal stack trace
-  BuildInternalStackTraceVisitor build_trace_visitor(skip_depth);
+  BuildInternalStackTraceVisitor build_trace_visitor(GetManagedStack(), GetTraceStack(),
+                                                     skip_depth);
   if (!build_trace_visitor.Init(depth, ts)) {
     return NULL;  // Allocation failed
   }
-  WalkStack(&build_trace_visitor);
-  return build_trace_visitor.GetInternalStackTrace();
+  build_trace_visitor.WalkStack();
+  return AddLocalReference<jobjectArray>(ts.Env(), build_trace_visitor.GetInternalStackTrace());
 }
 
 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
@@ -1375,8 +1198,8 @@
     // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
     Method* method = down_cast<Method*>(method_trace->Get(i));
     mh.ChangeMethod(method);
-    uint32_t native_pc = pc_trace->Get(i);
-    int32_t line_number = mh.GetLineNumFromNativePC(native_pc);
+    uint32_t dex_pc = pc_trace->Get(i);
+    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
     // Allocate element, potentially triggering GC
     // TODO: reuse class_name_object via Class::name_?
     const char* descriptor = mh.GetDeclaringClassDescriptor();
@@ -1609,8 +1432,8 @@
   DO_THREAD_OFFSET(state_);
   DO_THREAD_OFFSET(suspend_count_);
   DO_THREAD_OFFSET(thin_lock_id_);
-  DO_THREAD_OFFSET(top_of_managed_stack_);
-  DO_THREAD_OFFSET(top_of_managed_stack_pc_);
+  //DO_THREAD_OFFSET(top_of_managed_stack_);
+  //DO_THREAD_OFFSET(top_of_managed_stack_pc_);
   DO_THREAD_OFFSET(top_sirt_);
 #undef DO_THREAD_OFFSET
 
@@ -1628,68 +1451,100 @@
   os << offset;
 }
 
-class CatchBlockStackVisitor : public Thread::StackVisitor {
+static const bool kDebugExceptionDelivery = false;
+class CatchBlockStackVisitor : public StackVisitor {
  public:
-  CatchBlockStackVisitor(Class* to_find, Context* ljc)
-      : to_find_(to_find), long_jump_context_(ljc), native_method_count_(0),
+  CatchBlockStackVisitor(Thread* self, Throwable* exception)
+      : StackVisitor(self->GetManagedStack(), self->GetTraceStack(), self->GetLongJumpContext()),
+        self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL),
+        throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL),
+        handler_quick_frame_pc_(0), handler_dex_pc_(0), native_method_count_(0),
         method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) {
-#ifndef NDEBUG
-    handler_pc_ = 0xEBADC0DE;
-    handler_frame_.SetSP(reinterpret_cast<Method**>(0xEBADF00D));
-#endif
+    self->StartAssertNoThreadSuspension();  // Exception not in root sets, can't allow GC.
   }
 
-  bool VisitFrame(const Frame& fr, uintptr_t pc) {
-    Method* method = fr.GetMethod();
+  bool VisitFrame() {
+    Method* method = GetMethod();
     if (method == NULL) {
-      // This is the upcall, we remember the frame and last_pc so that we may
-      // long jump to them
-      handler_pc_ = DemanglePc(pc);
-      handler_frame_ = fr;
+      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+      handler_quick_frame_pc_ = GetCurrentQuickFramePc();
+      handler_quick_frame_ = GetCurrentQuickFrame();
       return false;  // End stack walk.
     }
     uint32_t dex_pc = DexFile::kDexNoIndex;
     if (method->IsRuntimeMethod()) {
       // ignore callee save method
       DCHECK(method->IsCalleeSaveMethod());
-    } else if (method->IsNative()) {
-      native_method_count_++;
     } else {
-      // Unwind stack when an exception occurs during method tracing
-      if (UNLIKELY(method_tracing_active_)) {
-#if !defined(ART_USE_LLVM_COMPILER)
-        if (IsTraceExitPc(DemanglePc(pc))) {
-          pc = ManglePc(TraceMethodUnwindFromCode(Thread::Current()));
-        }
-#else
-        UNIMPLEMENTED(FATAL);
-#endif
+      if (throw_method_ == NULL) {
+        throw_method_ = method;
+        throw_frame_id_ = GetFrameId();
+        throw_dex_pc_ = GetDexPc();
       }
-      dex_pc = method->ToDexPC(pc);
+      if (method->IsNative()) {
+        native_method_count_++;
+      } else {
+        // Unwind stack when an exception occurs during method tracing
+        if (UNLIKELY(method_tracing_active_ && IsTraceExitPc(GetCurrentQuickFramePc()))) {
+          uintptr_t pc = AdjustQuickFramePcForDexPcComputation(TraceMethodUnwindFromCode(Thread::Current()));
+          dex_pc = method->ToDexPC(pc);
+        } else {
+          dex_pc = GetDexPc();
+        }
+      }
     }
     if (dex_pc != DexFile::kDexNoIndex) {
       uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
       if (found_dex_pc != DexFile::kDexNoIndex) {
-        handler_pc_ = method->ToNativePC(found_dex_pc);
-        handler_frame_ = fr;
+        handler_dex_pc_ = found_dex_pc;
+        handler_quick_frame_pc_ = method->ToNativePC(found_dex_pc);
+        handler_quick_frame_ = GetCurrentQuickFrame();
         return false;  // End stack walk.
       }
     }
-#if !defined(ART_USE_LLVM_COMPILER)
-    // Caller may be handler, fill in callee saves in context
-    long_jump_context_->FillCalleeSaves(fr);
-#endif
     return true;  // Continue stack walk.
   }
 
-  // The type of the exception catch block to find
+  void DoLongJump() {
+    Method* catch_method = *handler_quick_frame_;
+    Dbg::PostException(throw_frame_id_, throw_method_, throw_dex_pc_,
+                       catch_method, handler_dex_pc_, exception_);
+    if (kDebugExceptionDelivery) {
+      if (catch_method == NULL) {
+        LOG(INFO) << "Handler is upcall";
+      } else {
+        ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+        const DexFile& dex_file =
+            class_linker->FindDexFile(catch_method->GetDeclaringClass()->GetDexCache());
+        int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
+        LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
+      }
+    }
+    self_->SetException(exception_);
+    self_->EndAssertNoThreadSuspension();  // Exception back in root set.
+    // Place context back on thread so it will be available when we continue.
+    self_->ReleaseLongJumpContext(context_);
+    context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
+    CHECK_NE(handler_quick_frame_pc_, 0u);
+    context_->SetPC(handler_quick_frame_pc_);
+    context_->SmashCallerSaves();
+    context_->DoLongJump();
+  }
+
+ private:
+  Thread* self_;
+  Throwable* exception_;
+  // The type of the exception catch block to find.
   Class* to_find_;
-  // Frame with found handler or last frame if no handler found
-  Frame handler_frame_;
-  // PC to branch to for the handler
-  uintptr_t handler_pc_;
-  // Context that will be the target of the long jump
-  Context* long_jump_context_;
+  Method* throw_method_;
+  JDWP::FrameId throw_frame_id_;
+  uint32_t throw_dex_pc_;
+  // Quick frame with found handler or last frame if no handler found.
+  Method** handler_quick_frame_;
+  // PC to branch to for the handler.
+  uintptr_t handler_quick_frame_pc_;
+  // Associated dex PC.
+  uint32_t handler_dex_pc_;
   // Number of native methods passed in crawl (equates to number of SIRTs to pop)
   uint32_t native_method_count_;
   // Is method tracing active?
@@ -1697,8 +1552,6 @@
 };
 
 void Thread::DeliverException() {
-#if !defined(ART_USE_LLVM_COMPILER)
-  const bool kDebugExceptionDelivery = false;
   Throwable* exception = GetException();  // Get exception from thread
   CHECK(exception != NULL);
   // Don't leave exception visible while we try to find the handler, which may cause class
@@ -1710,83 +1563,54 @@
     DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
                         << ": " << str_msg << "\n");
   }
-
-  Context* long_jump_context = GetLongJumpContext();
-  CatchBlockStackVisitor catch_finder(exception->GetClass(), long_jump_context);
-  WalkStack(&catch_finder, true);
-
-  Method** sp;
-  uintptr_t throw_native_pc;
-  Method* throw_method = GetCurrentMethod(&throw_native_pc, &sp);
-  uintptr_t catch_native_pc = catch_finder.handler_pc_;
-  Method* catch_method = catch_finder.handler_frame_.GetMethod();
-  Dbg::PostException(sp, throw_method, throw_native_pc, catch_method, catch_native_pc, exception);
-
-  if (kDebugExceptionDelivery) {
-    if (catch_method == NULL) {
-      LOG(INFO) << "Handler is upcall";
-    } else {
-      ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-      const DexFile& dex_file =
-          class_linker->FindDexFile(catch_method->GetDeclaringClass()->GetDexCache());
-      int line_number = dex_file.GetLineNumFromPC(catch_method,
-          catch_method->ToDexPC(catch_finder.handler_pc_));
-      LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
-    }
-  }
-  SetException(exception);
-  CHECK_NE(catch_native_pc, 0u);
-  long_jump_context->SetSP(reinterpret_cast<uintptr_t>(catch_finder.handler_frame_.GetSP()));
-  long_jump_context->SetPC(catch_native_pc);
-  long_jump_context->SmashCallerSaves();
-  long_jump_context->DoLongJump();
-#endif
+  CatchBlockStackVisitor catch_finder(this, exception);
+  catch_finder.WalkStack(true);
+  catch_finder.DoLongJump();
   LOG(FATAL) << "UNREACHABLE";
 }
 
 Context* Thread::GetLongJumpContext() {
   Context* result = long_jump_context_;
-#if !defined(ART_USE_LLVM_COMPILER)
   if (result == NULL) {
     result = Context::Create();
-    long_jump_context_ = result;
+  } else {
+    long_jump_context_ = NULL;  // Avoid context being shared.
   }
-#endif
   return result;
 }
 
-#if !defined(ART_USE_LLVM_COMPILER)
-Method* Thread::GetCurrentMethod(uintptr_t* pc, Method*** sp) const {
-  Frame f = top_of_managed_stack_;
-  Method* m = f.GetMethod();
-  uintptr_t native_pc = top_of_managed_stack_pc_;
+Method* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const {
+  struct CurrentMethodVisitor : public StackVisitor {
+    CurrentMethodVisitor(const ManagedStack* stack,
+                         const std::vector<TraceStackFrame>* trace_stack) :
+      StackVisitor(stack, trace_stack), method_(NULL), dex_pc_(0), frame_id_(0) {}
 
-  // We use JNI internally for exception throwing, so it's possible to arrive
-  // here via a "FromCode" function, in which case there's a synthetic
-  // callee-save method at the top of the stack. These shouldn't be user-visible,
-  // so if we find one, skip it and return the compiled method underneath.
-  if (m != NULL && m->IsCalleeSaveMethod()) {
-    native_pc = f.GetReturnPC();
-    f.Next();
-    m = f.GetMethod();
+    virtual bool VisitFrame() {
+      Method* m = GetMethod();
+      if (m->IsRuntimeMethod()) {
+        // Continue if this is a runtime method.
+        return true;
+      }
+      method_ = m;
+      dex_pc_ = GetDexPc();
+      frame_id_ = GetFrameId();
+      return false;
+    }
+    Method* method_;
+    uint32_t dex_pc_;
+    size_t frame_id_;
+  };
+
+  CurrentMethodVisitor visitor(GetManagedStack(), GetTraceStack());
+  visitor.WalkStack(false);
+  if (dex_pc != NULL) {
+    *dex_pc = visitor.dex_pc_;
   }
-  if (pc != NULL) {
-    *pc = (m != NULL) ? ManglePc(native_pc) : 0;
+  if (frame_id != NULL) {
+    *frame_id = visitor.frame_id_;
   }
-  if (sp != NULL) {
-    *sp = f.GetSP();
-  }
-  return m;
+  return visitor.method_;
 }
-#else
-Method* Thread::GetCurrentMethod(uintptr_t*, Method***) const {
-  ShadowFrame* frame = top_shadow_frame_;
-  if (frame == NULL) {
-    return NULL;
-  }
-  return frame->GetMethod();
-}
-#endif
 
 bool Thread::HoldsLock(Object* object) {
   if (object == NULL) {
@@ -1799,65 +1623,72 @@
   return DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(peer_);
 }
 
-#if !defined(ART_USE_LLVM_COMPILER)
-class ReferenceMapVisitor : public Thread::StackVisitor {
+class ReferenceMapVisitor : public StackVisitor {
  public:
-  ReferenceMapVisitor(Context* context, Heap::RootVisitor* root_visitor, void* arg) :
-    context_(context), root_visitor_(root_visitor), arg_(arg) {
+  ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
+                      Context* context, Heap::RootVisitor* root_visitor,
+                      void* arg) : StackVisitor(stack, trace_stack, context),
+                      root_visitor_(root_visitor), arg_(arg) {
   }
 
-  bool VisitFrame(const Frame& frame, uintptr_t pc) {
-    Method* m = frame.GetMethod();
+  bool VisitFrame() {
     if (false) {
-      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(m)
-                << StringPrintf("@ PC:%04x", m->ToDexPC(pc));
+      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
+          << StringPrintf("@ PC:%04x", GetDexPc());
     }
-    // Process register map (which native and callee save methods don't have)
-    if (!m->IsNative() && !m->IsCalleeSaveMethod() && !m->IsProxyMethod()) {
-      CHECK(m->GetGcMap() != NULL) << PrettyMethod(m);
-      CHECK_NE(0U, m->GetGcMapLength()) << PrettyMethod(m);
-      verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength());
-      const uint8_t* reg_bitmap = map.FindBitMap(m->ToDexPC(pc));
-      CHECK(reg_bitmap != NULL);
-      const VmapTable vmap_table(m->GetVmapTableRaw());
-      const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
-      DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
-      uint32_t core_spills = m->GetCoreSpillMask();
-      uint32_t fp_spills = m->GetFpSpillMask();
-      size_t frame_size = m->GetFrameSizeInBytes();
-      // For all dex registers in the bitmap
-      size_t num_regs = std::min(map.RegWidth() * 8,
-                                 static_cast<size_t>(code_item->registers_size_));
-      for (size_t reg = 0; reg < num_regs; ++reg) {
-        // Does this register hold a reference?
-        if (TestBitmap(reg, reg_bitmap)) {
-          uint32_t vmap_offset;
-          Object* ref;
-          if (vmap_table.IsInContext(reg, vmap_offset)) {
-            // Compute the register we need to load from the context
-            uint32_t spill_mask = m->GetCoreSpillMask();
-            CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
-            uint32_t matches = 0;
-            uint32_t spill_shifts = 0;
-            while (matches != (vmap_offset + 1)) {
-              DCHECK_NE(spill_mask, 0u);
-              matches += spill_mask & 1;  // Add 1 if the low bit is set
-              spill_mask >>= 1;
-              spill_shifts++;
+    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
+    if (shadow_frame != NULL) {
+      shadow_frame->VisitRoots(root_visitor_, arg_);
+    } else {
+      Method* m = GetMethod();
+      // Process register map (which native and runtime methods don't have)
+      if (!m->IsNative() && !m->IsRuntimeMethod()) {
+        const uint8_t* gc_map = m->GetGcMap();
+        CHECK(gc_map != NULL) << PrettyMethod(m);
+        uint32_t gc_map_length = m->GetGcMapLength();
+        CHECK_NE(0U, gc_map_length) << PrettyMethod(m);
+        verifier::PcToReferenceMap map(gc_map, gc_map_length);
+        const uint8_t* reg_bitmap = map.FindBitMap(GetDexPc());
+        CHECK(reg_bitmap != NULL);
+        const VmapTable vmap_table(m->GetVmapTableRaw());
+        const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+        DCHECK(code_item != NULL);  // can't be NULL or how would we compile its instructions?
+        uint32_t core_spills = m->GetCoreSpillMask();
+        uint32_t fp_spills = m->GetFpSpillMask();
+        size_t frame_size = m->GetFrameSizeInBytes();
+        // For all dex registers in the bitmap
+        size_t num_regs = std::min(map.RegWidth() * 8,
+                                   static_cast<size_t>(code_item->registers_size_));
+        for (size_t reg = 0; reg < num_regs; ++reg) {
+          // Does this register hold a reference?
+          if (TestBitmap(reg, reg_bitmap)) {
+            uint32_t vmap_offset;
+            Object* ref;
+            if (vmap_table.IsInContext(reg, vmap_offset)) {
+              // Compute the register we need to load from the context
+              uint32_t spill_mask = core_spills;
+              CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
+              uint32_t matches = 0;
+              uint32_t spill_shifts = 0;
+              while (matches != (vmap_offset + 1)) {
+                DCHECK_NE(spill_mask, 0u);
+                matches += spill_mask & 1;  // Add 1 if the low bit is set
+                spill_mask >>= 1;
+                spill_shifts++;
+              }
+              spill_shifts--;  // wind back one as we want the last match
+              ref = reinterpret_cast<Object*>(GetGPR(spill_shifts));
+            } else {
+              ref = reinterpret_cast<Object*>(GetVReg(code_item, core_spills, fp_spills,
+                                                      frame_size, reg));
             }
-            spill_shifts--;  // wind back one as we want the last match
-            ref = reinterpret_cast<Object*>(context_->GetGPR(spill_shifts));
-          } else {
-            ref = reinterpret_cast<Object*>(frame.GetVReg(code_item, core_spills, fp_spills,
-                                                          frame_size, reg));
-          }
-          if (ref != NULL) {
-            root_visitor_(ref, arg_);
+            if (ref != NULL) {
+              root_visitor_(ref, arg_);
+            }
           }
         }
       }
     }
-    context_->FillCalleeSaves(frame);
     return true;
   }
 
@@ -1866,14 +1697,11 @@
     return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
   }
 
-  // Context used to build up picture of callee saves
-  Context* context_;
   // Call-back when we visit a root
   Heap::RootVisitor* root_visitor_;
   // Argument to call-back
   void* arg_;
 };
-#endif
 
 void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
   if (exception_ != NULL) {
@@ -1889,29 +1717,25 @@
   jni_env_->monitors.VisitRoots(visitor, arg);
 
   SirtVisitRoots(visitor, arg);
-  ShadowFrameVisitRoots(visitor, arg);
 
-#if !defined(ART_USE_LLVM_COMPILER)
-  // Cheat and steal the long jump context. Assume that we are not doing a GC during exception
-  // delivery.
-  Context* context = GetLongJumpContext();
   // Visit roots on this thread's stack
-  ReferenceMapVisitor mapper(context, visitor, arg);
-  WalkStack(&mapper);
-#endif
+  Context* context = GetLongJumpContext();
+  ReferenceMapVisitor mapper(GetManagedStack(), GetTraceStack(), context, visitor, arg);
+  mapper.WalkStack();
+  ReleaseLongJumpContext(context);
 }
 
 #if VERIFY_OBJECT_ENABLED
-static void VerifyObject(const Object* obj, void*) {
-  Runtime::Current()->GetHeap()->VerifyObject(obj);
+static void VerifyObject(const Object* obj, void* arg) {
+  Heap* heap = reinterpret_cast<Heap*>(arg);
+  heap->VerifyObject(obj);
 }
 
 void Thread::VerifyStack() {
-#if !defined(ART_USE_LLVM_COMPILER)
   UniquePtr<Context> context(Context::Create());
-  ReferenceMapVisitor mapper(context.get(), VerifyObject, NULL);
-  WalkStack(&mapper);
-#endif
+  ReferenceMapVisitor mapper(GetManagedStack(), context.get(), VerifyObject,
+                             Runtime::Current()->GetHeap());
+  mapper.WalkStack();
 }
 #endif
 
diff --git a/src/thread.h b/src/thread.h
index ad07498..e48f444 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -35,7 +35,6 @@
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "offsets.h"
 #include "runtime_stats.h"
-#include "shadow_frame.h"
 #include "stack.h"
 #include "trace.h"
 #include "UniquePtr.h"
@@ -93,13 +92,6 @@
   static const size_t kStackOverflowReservedBytes = 8 * KB;
 #endif
 
-  class StackVisitor {
-   public:
-    virtual ~StackVisitor() {}
-    // Return 'true' if we should continue to visit more frames, 'false' to stop.
-    virtual bool VisitFrame(const Frame& frame, uintptr_t pc) = 0;
-  };
-
   // Creates a new native thread corresponding to the given managed peer.
   // Used to implement Thread.start.
   static void CreateNativeThread(Object* peer, size_t stack_size);
@@ -111,7 +103,7 @@
   // Reset internal state of child thread after fork.
   void InitAfterFork();
 
-  static Thread* Current() {
+  static Thread* Current() __attribute__ ((pure)) {
     // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
     // that we can replace this with a direct %fs access on x86.
     void* thread = pthread_getspecific(Thread::pthread_key_self_);
@@ -145,6 +137,31 @@
 
   void WaitUntilSuspended();
 
+  // Once called thread suspension will cause an assertion failure.
+  void StartAssertNoThreadSuspension() {
+#ifndef NDEBUG
+    no_thread_suspension_++;
+#endif
+  }
+  // End region where no thread suspension is expected.
+  void EndAssertNoThreadSuspension() {
+#ifndef NDEBUG
+    DCHECK_GT(no_thread_suspension_, 0U);
+    no_thread_suspension_--;
+#endif
+  }
+
+  void AssertThreadSuspensionIsAllowable() const {
+    DCHECK_EQ(0u, no_thread_suspension_);
+  }
+
+  bool CanAccessDirectReferences() const {
+#ifdef MOVING_GARBAGE_COLLECTOR
+    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
+#endif
+    return true;
+  }
+
   bool HoldsLock(Object*);
 
   /*
@@ -168,13 +185,6 @@
   // Returns the "system" ThreadGroup, used when attaching our internal threads.
   static Object* GetSystemThreadGroup();
 
-  bool CanAccessDirectReferences() const {
-#ifdef MOVING_GARBAGE_COLLECTOR
-    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
-#endif
-    return true;
-  }
-
   uint32_t GetThinLockId() const {
     return thin_lock_id_;
   }
@@ -209,9 +219,6 @@
 
   bool IsStillStarting() const;
 
-  // Returns the current Method* and native PC (not dex PC) for this thread.
-  Method* GetCurrentMethod(uintptr_t* pc = NULL, Method*** sp = NULL) const;
-
   bool IsExceptionPending() const {
     return exception_ != NULL;
   }
@@ -236,20 +243,21 @@
   void DeliverException();
 
   Context* GetLongJumpContext();
-
-  Frame GetTopOfStack() const {
-    return top_of_managed_stack_;
+  void ReleaseLongJumpContext(Context* context) {
+    DCHECK(long_jump_context_ == NULL);
+    long_jump_context_ = context;
   }
 
-  // TODO: this is here for testing, remove when we have exception unit tests
-  // that use the real stack
+  Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const;
+
   void SetTopOfStack(void* stack, uintptr_t pc) {
-    top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
-    top_of_managed_stack_pc_ = pc;
+    Method** top_method = reinterpret_cast<Method**>(stack);
+    managed_stack_.SetTopQuickFrame(top_method);
+    managed_stack_.SetTopQuickFramePc(pc);
   }
 
-  void SetTopOfStackPC(uintptr_t pc) {
-    top_of_managed_stack_pc_ = pc;
+  bool HasManagedStack() const {
+    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
   }
 
   // If 'msg' is NULL, no detail message is set.
@@ -268,7 +276,7 @@
   // Only the GC should call this.
   void ThrowOutOfMemoryError(const char* msg);
 
-  Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
+  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
 
   void* FindExceptionHandlerInMethod(const Method* method,
                                      void* throw_pc,
@@ -284,30 +292,6 @@
     return jni_env_;
   }
 
-  // Number of references in SIRTs on this thread
-  size_t NumSirtReferences();
-
-  // Number of references allocated in ShadowFrames on this thread
-  size_t NumShadowFrameReferences();
-
-  // Number of references allocated in SIRTs & shadow frames on this thread
-  size_t NumStackReferences() {
-    return NumSirtReferences() + NumShadowFrameReferences();
-  };
-
-  // Is the given obj in this thread's stack indirect reference table?
-  bool SirtContains(jobject obj);
-
-  // Is the given obj in this thread's ShadowFrame?
-  bool ShadowFrameContains(jobject obj);
-
-  // Is the given obj in this thread's Sirts & ShadowFrames?
-  bool StackReferencesContain(jobject obj);
-
-  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
-
-  void ShadowFrameVisitRoots(Heap::RootVisitor* visitor, void* arg);
-
   // Convert a jobject into a Object*
   Object* DecodeJObject(jobject obj);
 
@@ -339,10 +323,6 @@
     NotifyLocked();
   }
 
-  // Linked list recording transitions from native to managed code
-  void PushNativeToManagedRecord(NativeToManagedRecord* record);
-  void PopNativeToManagedRecord(const NativeToManagedRecord& record);
-
   const ClassLoader* GetClassLoaderOverride() {
     // TODO: need to place the class_loader_override_ in a handle
     // DCHECK(CanAccessDirectReferences());
@@ -433,32 +413,58 @@
   }
 
   static ThreadOffset TopOfManagedStackOffset() {
-    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
-        OFFSETOF_MEMBER(Frame, sp_));
+    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
+                        ManagedStack::TopQuickFrameOffset());
   }
 
   static ThreadOffset TopOfManagedStackPcOffset() {
-    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
+    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
+                        ManagedStack::TopQuickFramePcOffset());
   }
 
-  ShadowFrame* PushShadowFrame(ShadowFrame* frame) {
-    ShadowFrame* old_frame = top_shadow_frame_;
-    top_shadow_frame_ = frame;
-    frame->SetLink(old_frame);
-    return old_frame;
+  const ManagedStack* GetManagedStack() const {
+    return &managed_stack_;
+  }
+
+  // Linked list recording fragments of managed stack.
+  void PushManagedStackFragment(ManagedStack* fragment) {
+    managed_stack_.PushManagedStackFragment(fragment);
+  }
+  void PopManagedStackFragment(const ManagedStack& fragment) {
+    managed_stack_.PopManagedStackFragment(fragment);
+  }
+
+  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
+    return managed_stack_.PushShadowFrame(new_top_frame);
   }
 
   ShadowFrame* PopShadowFrame() {
-    CHECK(top_shadow_frame_ != NULL);
-    ShadowFrame* frame = top_shadow_frame_;
-    top_shadow_frame_ = frame->GetLink();
-    return frame;
+    return managed_stack_.PopShadowFrame();
   }
 
   static ThreadOffset TopShadowFrameOffset() {
-    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shadow_frame_));
+    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
+                        ManagedStack::TopShadowFrameOffset());
   }
 
+  // Number of references allocated in ShadowFrames on this thread
+  size_t NumShadowFrameReferences() const {
+    return managed_stack_.NumShadowFrameReferences();
+  }
+
+  // Number of references in SIRTs on this thread
+  size_t NumSirtReferences();
+
+  // Number of references allocated in SIRTs & shadow frames on this thread
+  size_t NumStackReferences() {
+    return NumSirtReferences() + NumShadowFrameReferences();
+  };
+
+  // Is the given obj in this thread's stack indirect reference table?
+  bool SirtContains(jobject obj);
+
+  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
+
   void PushSirt(StackIndirectReferenceTable* sirt);
   StackIndirectReferenceTable* PopSirt();
 
@@ -466,20 +472,18 @@
     return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
   }
 
-  void WalkStack(StackVisitor* visitor, bool include_upcalls = false) const;
-
   DebugInvokeReq* GetInvokeReq() {
     return debug_invoke_req_;
   }
 
   void SetDebuggerUpdatesEnabled(bool enabled);
 
-  bool IsTraceStackEmpty() const {
-    return trace_stack_->empty();
+  const std::vector<TraceStackFrame>* GetTraceStack() const {
+    return trace_stack_;
   }
 
-  TraceStackFrame GetTraceStackFrame(uint32_t depth) const {
-    return trace_stack_->at(trace_stack_->size() - depth - 1);
+  bool IsTraceStackEmpty() const {
+    return trace_stack_->empty();
   }
 
   void PushTraceStackFrame(const TraceStackFrame& frame) {
@@ -532,6 +536,47 @@
 
   static void ThreadExitCallback(void* arg);
 
+  // TLS key used to retrieve the Thread*.
+  static pthread_key_t pthread_key_self_;
+
+  // --- Frequently accessed fields first for short offsets ---
+
+  // A non-zero value is used to tell the current thread to enter a safe point
+  // at the next poll.
+  int suspend_count_;
+
+  // The biased card table, see CardTable for details
+  byte* card_table_;
+
+  // The pending exception or NULL.
+  Throwable* exception_;
+
+  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
+  // We leave extra space so there's room for the code that throws StackOverflowError.
+  byte* stack_end_;
+
+  // The top of the managed stack often manipulated directly by compiler generated code.
+  ManagedStack managed_stack_;
+
+  // Every thread may have an associated JNI environment
+  JNIEnvExt* jni_env_;
+
+  // Initialized to "this". On certain architectures (such as x86) reading
+  // off of Thread::Current is easy but getting the address of Thread::Current
+  // is hard. This field can be read off of Thread::Current to give the address.
+  Thread* self_;
+
+  volatile ThreadState state_;
+
+  // Our managed peer (an instance of java.lang.Thread).
+  Object* peer_;
+
+  // The "lowest addressable byte" of the stack
+  byte* stack_begin_;
+
+  // Size of the stack
+  size_t stack_size_;
+
   // Thin lock thread id. This is a small integer used by the thin lock implementation.
   // This is not to be confused with the native thread's tid, nor is it the value returned
   // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
@@ -542,20 +587,6 @@
   // System thread id.
   pid_t tid_;
 
-  // Our managed peer (an instance of java.lang.Thread).
-  Object* peer_;
-
-  // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
-  // compiled code, so we keep them early in the structure to (a) avoid having to keep
-  // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
-
-  // Top of the managed stack, written out prior to the state transition from
-  // kRunnable to kNative. Uses include giving the starting point for scanning
-  // a managed stack when a thread is in native code.
-  Frame top_of_managed_stack_;
-  // PC corresponding to the call out of the top_of_managed_stack_ frame
-  uintptr_t top_of_managed_stack_pc_;
-
   // Guards the 'interrupted_' and 'wait_monitor_' members.
   mutable Mutex* wait_mutex_;
   ConditionVariable* wait_cond_;
@@ -570,53 +601,12 @@
 
   friend class Monitor;
 
-  RuntimeStats stats_;
-
-  // The biased card table, see CardTable for details
-  byte* card_table_;
-
-  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
-  // We leave extra space so there's room for the code that throws StackOverflowError.
-  byte* stack_end_;
-
-  // Size of the stack
-  size_t stack_size_;
-
-  // The "lowest addressable byte" of the stack
-  byte* stack_begin_;
-
-  // A linked list (of stack allocated records) recording transitions from
-  // native to managed code.
-  NativeToManagedRecord* native_to_managed_record_;
-
   // Top of linked list of stack indirect reference tables or NULL for none
   StackIndirectReferenceTable* top_sirt_;
 
-  // Top of linked list of shadow stack or NULL for none
-  // Some backend may require shadow frame to ease the GC work.
-  ShadowFrame* top_shadow_frame_;
-
-  // Every thread may have an associated JNI environment
-  JNIEnvExt* jni_env_;
-
-  volatile ThreadState state_;
-
-  // Initialized to "this". On certain architectures (such as x86) reading
-  // off of Thread::Current is easy but getting the address of Thread::Current
-  // is hard. This field can be read off of Thread::Current to give the address.
-  Thread* self_;
-
   Runtime* runtime_;
 
-  // The pending exception or NULL.
-  Throwable* exception_;
-
-  // A non-zero value is used to tell the current thread to enter a safe point
-  // at the next poll.
-  int suspend_count_;
-  // How much of 'suspend_count_' is by request of the debugger, used to set things right
-  // when the debugger detaches. Must be <= suspend_count_.
-  int debug_suspend_count_;
+  RuntimeStats stats_;
 
   // Needed to get the right ClassLoader in JNI_OnLoad, but also
   // useful for testing.
@@ -628,12 +618,13 @@
   // A boolean telling us whether we're recursively throwing OOME.
   uint32_t throwing_OutOfMemoryError_;
 
+  // How much of 'suspend_count_' is by request of the debugger, used to set things right
+  // when the debugger detaches. Must be <= suspend_count_.
+  int debug_suspend_count_;
+
   // JDWP invoke-during-breakpoint support.
   DebugInvokeReq* debug_invoke_req_;
 
-  // TLS key used to retrieve the Thread*.
-  static pthread_key_t pthread_key_self_;
-
   // Additional stack used by method tracer to store method and return pc values.
   // Stored as a pointer since std::vector is not PACKED.
   std::vector<TraceStackFrame>* trace_stack_;
@@ -644,8 +635,11 @@
   // A cached pthread_t for the pthread underlying this Thread*.
   pthread_t pthread_self_;
 
+  // Mutexes held by this thread, see CheckSafeToLockOrUnlock.
   uint32_t held_mutexes_[kMaxMutexRank + 1];
 
+  // A positive value implies we're in a region where thread suspension isn't expected.
+  uint32_t no_thread_suspension_;
  public:
   // Runtime support function pointers
   EntryPoints entrypoints_;
diff --git a/src/thread_x86.cc b/src/thread_x86.cc
index acc38f4..6a72286 100644
--- a/src/thread_x86.cc
+++ b/src/thread_x86.cc
@@ -130,6 +130,9 @@
       : "r"(THREAD_SELF_OFFSET)  // input
       :);  // clobber
   CHECK_EQ(self_check, this);
+
+  // Sanity check other offsets.
+  CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_));
 }
 
 }  // namespace art
diff --git a/src/trace.cc b/src/trace.cc
index a453e37..7b7c3eb 100644
--- a/src/trace.cc
+++ b/src/trace.cc
@@ -194,26 +194,28 @@
   return true;
 }
 
-static void TraceRestoreStack(Thread* t, void*) {
-  Frame frame = t->GetTopOfStack();
-  if (frame.GetSP() != 0) {
-    for ( ; frame.GetMethod() != 0; frame.Next()) {
-      if (t->IsTraceStackEmpty()) {
-        break;
+static void TraceRestoreStack(Thread* self, void*) {
+  struct RestoreStackVisitor : public StackVisitor {
+    RestoreStackVisitor(Thread* self) : StackVisitor(self->GetManagedStack(),
+                                                     self->GetTraceStack()), self_(self) {}
+
+    virtual bool VisitFrame() {
+      if (self_->IsTraceStackEmpty()) {
+        return false;  // Stop.
       }
-#if defined(ART_USE_LLVM_COMPILER)
-      UNIMPLEMENTED(FATAL);
-#else
-      uintptr_t pc = frame.GetReturnPC();
-      Method* method = frame.GetMethod();
+      uintptr_t pc = GetReturnPc();
       if (IsTraceExitPc(pc)) {
-        TraceStackFrame trace_frame = t->PopTraceStackFrame();
-        frame.SetReturnPC(trace_frame.return_pc_);
-        CHECK(method == trace_frame.method_);
+        TraceStackFrame trace_frame = self_->PopTraceStackFrame();
+        SetReturnPc(trace_frame.return_pc_);
+        CHECK(GetMethod() == trace_frame.method_);
       }
-#endif
+      return true;  // Continue.
     }
-  }
+
+    Thread* self_;
+  };
+  RestoreStackVisitor visitor(self);
+  visitor.WalkStack();
 }
 
 void Trace::AddSavedCodeToMap(const Method* method, const void* code) {
@@ -470,10 +472,9 @@
   for (It it = visited_methods_.begin(); it != visited_methods_.end(); ++it) {
     const Method* method = *it;
     MethodHelper mh(method);
-    os << StringPrintf("%p\t%s\t%s\t%s\t%s\t%d\n", method,
+    os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
         PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(),
-        mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile(),
-        mh.GetLineNumFromNativePC(0));
+        mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile());
   }
 }
 
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 20be7a1..d7910af 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -40,23 +40,21 @@
           << "Error: Reg @ " << i << "-th argument is not in GC map"; \
   } while (false)
 
-struct ReferenceMap2Visitor : public Thread::StackVisitor {
-  ReferenceMap2Visitor() {
+struct ReferenceMap2Visitor : public StackVisitor {
+  explicit ReferenceMap2Visitor(const ManagedStack* stack,
+                                const std::vector<TraceStackFrame>* trace_stack) :
+    StackVisitor(stack, trace_stack) {
   }
 
-  bool VisitFrame(const Frame& frame, uintptr_t pc) {
-    Method* m = frame.GetMethod();
-    if (!m || m->IsNative()) {
+  bool VisitFrame() {
+    Method* m = GetMethod();
+    if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
       return true;
     }
     LOG(INFO) << "At " << PrettyMethod(m, false);
 
     verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength());
 
-    if (!pc) {
-      // pc == NULL: m is either a native method or a phony method
-      return true;
-    }
     if (m->IsCalleeSaveMethod()) {
       LOG(WARNING) << "no PC for " << PrettyMethod(m);
       return true;
@@ -283,8 +281,9 @@
 
 extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jint count) {
   // Visitor
-  ReferenceMap2Visitor mapper;
-  Thread::Current()->WalkStack(&mapper);
+  ReferenceMap2Visitor mapper(Thread::Current()->GetManagedStack(),
+                              Thread::Current()->GetTraceStack());
+  mapper.WalkStack();
 
   return count + 1;
 }
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc
index 15d866f..3773198 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/StackWalk/stack_walk_jni.cc
@@ -29,31 +29,36 @@
   ( ((reg) < mh.GetCodeItem()->registers_size_) &&                       \
     (( *((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) )
 
-#define CHECK_REGS(...) do {          \
-    int t[] = {__VA_ARGS__};             \
-    int t_size = sizeof(t) / sizeof(*t);      \
-    for (int i = 0; i < t_size; ++i)          \
-      CHECK(REG(mh, reg_bitmap, t[i])) << "Error: Reg " << i << " is not in RegisterMap";  \
-  } while (false)
+#define CHECK_REGS(...) if (!IsShadowFrame()) { \
+    int t[] = {__VA_ARGS__}; \
+    int t_size = sizeof(t) / sizeof(*t); \
+    for (int i = 0; i < t_size; ++i) \
+      CHECK(REG(mh, reg_bitmap, t[i])) << "Error: Reg " << i << " is not in RegisterMap"; \
+  }
 
 static int gJava_StackWalk_refmap_calls = 0;
 
-struct TestReferenceMapVisitor : public Thread::StackVisitor {
-  TestReferenceMapVisitor() {
+struct TestReferenceMapVisitor : public StackVisitor {
+  explicit TestReferenceMapVisitor(const ManagedStack* stack,
+                                   const std::vector<TraceStackFrame>* trace_stack) :
+    StackVisitor(stack, trace_stack) {
   }
 
-  bool VisitFrame(const Frame& frame, uintptr_t pc) {
-    Method* m = frame.GetMethod();
+  bool VisitFrame() {
+    Method* m = GetMethod();
     CHECK(m != NULL);
     LOG(INFO) << "At " << PrettyMethod(m, false);
 
     if (m->IsCalleeSaveMethod() || m->IsNative()) {
       LOG(WARNING) << "no PC for " << PrettyMethod(m);
-      CHECK_EQ(pc, 0u);
+      CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
       return true;
     }
-    verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength());
-    const uint8_t* reg_bitmap = map.FindBitMap(m->ToDexPC(pc));
+    const uint8_t* reg_bitmap = NULL;
+    if (!IsShadowFrame()) {
+      verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength());
+      reg_bitmap = map.FindBitMap(GetDexPc());
+    }
     MethodHelper mh(m);
     StringPiece m_name(mh.GetName());
 
@@ -62,29 +67,29 @@
     // find is what is expected.
     if (m_name == "f") {
       if (gJava_StackWalk_refmap_calls == 1) {
-        CHECK_EQ(1U, m->ToDexPC(pc));
+        CHECK_EQ(1U, GetDexPc());
         CHECK_REGS(1);
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
-        CHECK_EQ(5U, m->ToDexPC(pc));
+        CHECK_EQ(5U, GetDexPc());
         CHECK_REGS(1);
       }
     } else if (m_name == "g") {
       if (gJava_StackWalk_refmap_calls == 1) {
-        CHECK_EQ(0xcU, m->ToDexPC(pc));
+        CHECK_EQ(0xcU, GetDexPc());
         CHECK_REGS(0, 2);  // Note that v1 is not in the minimal root set
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
-        CHECK_EQ(0xcU, m->ToDexPC(pc));
+        CHECK_EQ(0xcU, GetDexPc());
         CHECK_REGS(0, 2);
       }
     } else if (m_name == "shlemiel") {
       if (gJava_StackWalk_refmap_calls == 1) {
-        CHECK_EQ(0x380U, m->ToDexPC(pc));
+        CHECK_EQ(0x380U, GetDexPc());
         CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
-        CHECK_EQ(0x380U, m->ToDexPC(pc));
+        CHECK_EQ(0x380U, GetDexPc());
         CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
       }
     }
@@ -99,8 +104,9 @@
   gJava_StackWalk_refmap_calls++;
 
   // Visitor
-  TestReferenceMapVisitor mapper;
-  Thread::Current()->WalkStack(&mapper);
+  TestReferenceMapVisitor mapper(Thread::Current()->GetManagedStack(),
+                                 Thread::Current()->GetTraceStack());
+  mapper.WalkStack();
 
   return count + 1;
 }
@@ -109,8 +115,9 @@
   gJava_StackWalk_refmap_calls++;
 
   // Visitor
-  TestReferenceMapVisitor mapper;
-  Thread::Current()->WalkStack(&mapper);
+  TestReferenceMapVisitor mapper(Thread::Current()->GetManagedStack(),
+                                 Thread::Current()->GetTraceStack());
+  mapper.WalkStack();
 
   return count + 1;
 }