Consistently use __asm__ rather than asm.

For grepability.

Change-Id: Iae46e1684695eb42cfc1267b744d0442acbbfe25
diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc
index d0f6e8c..b0d59ea 100644
--- a/src/compiler_llvm/runtime_support_llvm.cc
+++ b/src/compiler_llvm/runtime_support_llvm.cc
@@ -47,7 +47,7 @@
 Thread* art_get_current_thread_from_code() {
 #if defined(__i386__)
   Thread* ptr;
-  asm volatile("movl %%fs:(%1), %0"
+  __asm__ __volatile__("movl %%fs:(%1), %0"
       : "=r"(ptr)  // output
       : "r"(THREAD_SELF_OFFSET)  // input
       :);  // clobber
diff --git a/src/oat/runtime/x86/context_x86.cc b/src/oat/runtime/x86/context_x86.cc
index d6f45da..412b655 100644
--- a/src/oat/runtime/x86/context_x86.cc
+++ b/src/oat/runtime/x86/context_x86.cc
@@ -63,7 +63,7 @@
   // correct delivery instruction.
   gprs_[ESP] -= 4;
   *(reinterpret_cast<uintptr_t*>(gprs_[ESP])) = eip_;
-  asm volatile(
+  __asm__ __volatile__(
       "pushl %4\n\t"
       "pushl %0\n\t"
       "pushl %1\n\t"
diff --git a/src/thread_x86.cc b/src/thread_x86.cc
index 30d19d4..acc38f4 100644
--- a/src/thread_x86.cc
+++ b/src/thread_x86.cc
@@ -113,7 +113,7 @@
   uint16_t rpl = 3;  // Requested privilege level
   uint16_t selector = (entry_number << 3) | table_indicator | rpl;
   // TODO: use our assembler to generate code
-  asm volatile("movw %w0, %%fs"
+  __asm__ __volatile__("movw %w0, %%fs"
       :    // output
       : "q"(selector)  // input
       :);  // clobber
@@ -125,7 +125,7 @@
   Thread* self_check;
   // TODO: use our assembler to generate code
   CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_));
-  asm volatile("movl %%fs:(%1), %0"
+  __asm__ __volatile__("movl %%fs:(%1), %0"
       : "=r"(self_check)  // output
       : "r"(THREAD_SELF_OFFSET)  // input
       :);  // clobber