ART: Fix unused variables and functions

Change-Id: Icbab884d2dfd71656347368b424cb35cbf524051
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 4d8bec2..60d996b 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -30,7 +30,6 @@
 // TODO: Create an analysis/optimization abstraction.
 static const char* kLivenessPassName = "liveness";
 static const char* kRegisterAllocatorPassName = "register";
-static const char* kGVNPassName = "gvn";
 
 /**
  * If enabled, emits compilation information suitable for the c1visualizer tool
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index d281248..e36ef19 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
 #define ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
 
-#include "graph_visualizer.h"
 #include "nodes.h"
 
 namespace art {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 9c84bc1..0f28591 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -205,10 +205,9 @@
       encoding =  am_ | offset_;
     }
   } else {
-    uint32_t imm5 = offset_;
     uint32_t shift = shift_;
     if (shift == RRX) {
-      imm5 = 0;
+      CHECK_EQ(offset_, 0);
       shift = ROR;
     }
     encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 1a8ca02..67265a2 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -37,9 +37,9 @@
 
 template <const bool kAccessCheck>
 ALWAYS_INLINE
-static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
-                                              mirror::ArtMethod* method,
-                                              Thread* self, bool* slow_path) {
+inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+                                       mirror::ArtMethod* method,
+                                       Thread* self, bool* slow_path) {
   mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
   if (UNLIKELY(klass == NULL)) {
     klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
@@ -90,9 +90,9 @@
 }
 
 ALWAYS_INLINE
-static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
-                                                                 Thread* self,
-                                                                 bool* slow_path) {
+inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+                                                          Thread* self,
+                                                          bool* slow_path) {
   if (UNLIKELY(!klass->IsInitialized())) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -120,10 +120,10 @@
 // check.
 template <bool kAccessCheck, bool kInstrumented>
 ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
-                                                  mirror::ArtMethod* method,
-                                                  Thread* self,
-                                                  gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+                                           mirror::ArtMethod* method,
+                                           Thread* self,
+                                           gc::AllocatorType allocator_type) {
   bool slow_path = false;
   mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
   if (UNLIKELY(slow_path)) {
@@ -139,9 +139,9 @@
 // Given the context of a calling Method and a resolved class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
-                                                          Thread* self,
-                                                          gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+                                                   Thread* self,
+                                                   gc::AllocatorType allocator_type) {
   DCHECK(klass != nullptr);
   bool slow_path = false;
   klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
@@ -160,9 +160,9 @@
 // Given the context of a calling Method and an initialized class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
-                                                             Thread* self,
-                                                             gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+                                                      Thread* self,
+                                                      gc::AllocatorType allocator_type) {
   DCHECK(klass != nullptr);
   // Pass in false since the object can not be finalizable.
   return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -171,10 +171,10 @@
 
 template <bool kAccessCheck>
 ALWAYS_INLINE
-static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
-                                             mirror::ArtMethod* method,
-                                             int32_t component_count,
-                                             bool* slow_path) {
+inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+                                      mirror::ArtMethod* method,
+                                      int32_t component_count,
+                                      bool* slow_path) {
   if (UNLIKELY(component_count < 0)) {
     ThrowNegativeArraySizeException(component_count);
     *slow_path = true;
@@ -207,11 +207,11 @@
 // check.
 template <bool kAccessCheck, bool kInstrumented>
 ALWAYS_INLINE
-static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
-                                                mirror::ArtMethod* method,
-                                                int32_t component_count,
-                                                Thread* self,
-                                                gc::AllocatorType allocator_type) {
+inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+                                         mirror::ArtMethod* method,
+                                         int32_t component_count,
+                                         Thread* self,
+                                         gc::AllocatorType allocator_type) {
   bool slow_path = false;
   mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count,
                                                        &slow_path);
@@ -230,11 +230,11 @@
 
 template <bool kAccessCheck, bool kInstrumented>
 ALWAYS_INLINE
-static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
-                                                        mirror::ArtMethod* method,
-                                                        int32_t component_count,
-                                                        Thread* self,
-                                                        gc::AllocatorType allocator_type) {
+inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+                                                 mirror::ArtMethod* method,
+                                                 int32_t component_count,
+                                                 Thread* self,
+                                                 gc::AllocatorType allocator_type) {
   DCHECK(klass != nullptr);
   if (UNLIKELY(component_count < 0)) {
     ThrowNegativeArraySizeException(component_count);
@@ -254,8 +254,8 @@
 }
 
 template<FindFieldType type, bool access_check>
-static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
-                                                  Thread* self, size_t expected_size) {
+inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+                                           Thread* self, size_t expected_size) {
   bool is_primitive;
   bool is_set;
   bool is_static;
@@ -349,9 +349,9 @@
 #undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
 
 template<InvokeType type, bool access_check>
-static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                                    mirror::Object** this_object,
-                                                    mirror::ArtMethod** referrer, Thread* self) {
+inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
+                                             mirror::Object** this_object,
+                                             mirror::ArtMethod** referrer, Thread* self) {
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
   mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
   if (resolved_method == nullptr) {
@@ -475,9 +475,9 @@
 #undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
 
 // Fast path field resolution that can't initialize classes or throw exceptions.
-static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
-                                              mirror::ArtMethod* referrer,
-                                              FindFieldType type, size_t expected_size) {
+inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+                                       mirror::ArtMethod* referrer,
+                                       FindFieldType type, size_t expected_size) {
   mirror::ArtField* resolved_field =
       referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
   if (UNLIKELY(resolved_field == nullptr)) {
@@ -528,10 +528,10 @@
 }
 
 // Fast path method resolution that can't throw exceptions.
-static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
-                                                mirror::Object* this_object,
-                                                mirror::ArtMethod* referrer,
-                                                bool access_check, InvokeType type) {
+inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
+                                         mirror::Object* this_object,
+                                         mirror::ArtMethod* referrer,
+                                         bool access_check, InvokeType type) {
   if (UNLIKELY(this_object == NULL && type != kStatic)) {
     return NULL;
   }
@@ -568,7 +568,7 @@
   }
 }
 
-static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
+inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
                                                     mirror::ArtMethod* referrer,
                                                     Thread* self, bool can_run_clinit,
                                                     bool verify_access) {
@@ -604,13 +604,13 @@
   return h_class.Get();
 }
 
-static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
-                                                    uint32_t string_idx) {
+inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
+                                             uint32_t string_idx) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   return class_linker->ResolveString(string_idx, referrer);
 }
 
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
+inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
   // Save any pending exception over monitor exit call.
   mirror::Throwable* saved_exception = NULL;
   ThrowLocation saved_throw_location;
@@ -635,7 +635,7 @@
 }
 
 template <typename INT_TYPE, typename FLOAT_TYPE>
-static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
+inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
   const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
   const INT_TYPE kMinInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::min());
   const FLOAT_TYPE kMaxIntAsFloat = static_cast<FLOAT_TYPE>(kMaxInt);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 311cafa..0531122 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -42,13 +42,14 @@
 class Thread;
 
 template <const bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
-                                                            mirror::ArtMethod* method,
-                                                            Thread* self, bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+                                                     mirror::ArtMethod* method,
+                                                     Thread* self, bool* slow_path)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
-                                                                               Thread* self, bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+                                                                        Thread* self,
+                                                                        bool* slow_path)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
@@ -56,32 +57,32 @@
 // When verification/compiler hasn't been able to verify access, optionally perform an access
 // check.
 template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
-                                                                mirror::ArtMethod* method,
-                                                                Thread* self,
-                                                                gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+                                                         mirror::ArtMethod* method,
+                                                         Thread* self,
+                                                         gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Given the context of a calling Method and a resolved class, create an instance.
 template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
-                                                                        Thread* self,
-                                                                        gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+                                                                 Thread* self,
+                                                                 gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Given the context of a calling Method and an initialized class, create an instance.
 template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
-                                                                           Thread* self,
-                                                                           gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+                                                                    Thread* self,
+                                                                    gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 
 template <bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
-                                                           mirror::ArtMethod* method,
-                                                           int32_t component_count,
-                                                           bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+                                                    mirror::ArtMethod* method,
+                                                    int32_t component_count,
+                                                    bool* slow_path)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
@@ -89,19 +90,19 @@
 // When verification/compiler hasn't been able to verify access, optionally perform an access
 // check.
 template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
-                                                              mirror::ArtMethod* method,
-                                                              int32_t component_count,
-                                                              Thread* self,
-                                                              gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+                                                       mirror::ArtMethod* method,
+                                                       int32_t component_count,
+                                                       Thread* self,
+                                                       gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
-                                                                      mirror::ArtMethod* method,
-                                                                      int32_t component_count,
-                                                                      Thread* self,
-                                                                      gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+                                                               mirror::ArtMethod* method,
+                                                               int32_t component_count,
+                                                               Thread* self,
+                                                               gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
@@ -130,43 +131,42 @@
 };
 
 template<FindFieldType type, bool access_check>
-static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
-                                                  Thread* self, size_t expected_size)
+inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+                                           Thread* self, size_t expected_size)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 template<InvokeType type, bool access_check>
-static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                                    mirror::Object** this_object,
-                                                    mirror::ArtMethod** referrer, Thread* self)
+inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
+                                             mirror::Object** this_object,
+                                             mirror::ArtMethod** referrer, Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Fast path field resolution that can't initialize classes or throw exceptions.
-static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
-                                              mirror::ArtMethod* referrer,
-                                              FindFieldType type, size_t expected_size)
+inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+                                       mirror::ArtMethod* referrer,
+                                       FindFieldType type, size_t expected_size)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Fast path method resolution that can't throw exceptions.
-static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
-                                                mirror::Object* this_object,
-                                                mirror::ArtMethod* referrer,
-                                                bool access_check, InvokeType type)
+inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
+                                         mirror::Object* this_object,
+                                         mirror::ArtMethod* referrer,
+                                         bool access_check, InvokeType type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
-                                                    mirror::ArtMethod* referrer,
-                                                    Thread* self, bool can_run_clinit,
-                                                    bool verify_access)
+inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
+                                             mirror::ArtMethod* referrer,
+                                             Thread* self, bool can_run_clinit,
+                                             bool verify_access)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
-                                                    uint32_t string_idx)
+inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, uint32_t string_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // TODO: annotalysis disabled as monitor semantics are maintained in Java code.
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
+inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
     NO_THREAD_SAFETY_ANALYSIS;
 
 void CheckReferenceResult(mirror::Object* o, Thread* self)
@@ -181,7 +181,7 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 template <typename INT_TYPE, typename FLOAT_TYPE>
-static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
+inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
 
 }  // namespace art
 
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 420e9df..760eb9b 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -118,14 +118,12 @@
 
 static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
   ScopedUtfChars name(env, java_name);
-  Thread* self;
   {
     ScopedObjectAccess soa(env);
     if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
       soa.Self()->SetThreadName(name.c_str());
       return;
     }
-    self = soa.Self();
   }
   // Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
   // thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock