Merge "Fix file open and write binary" into emu-master-dev
diff --git a/android-qemu2-glue/main.cpp b/android-qemu2-glue/main.cpp
index 0e9c3c6..56ef3f6 100755
--- a/android-qemu2-glue/main.cpp
+++ b/android-qemu2-glue/main.cpp
@@ -1053,8 +1053,12 @@
             rendererConfig.selectedRenderer == SELECTED_RENDERER_ANGLE9;
     // Features to disable or enable depending on rendering backend
     // and gpu make/model/version
+#if defined(__APPLE__) && defined(__aarch64__)
+    shouldDisableAsyncSwap = false;
+#else
     shouldDisableAsyncSwap |= !strncmp("arm", kTarget.androidArch, 3) ||
                               System::get()->getProgramBitness() == 32;
+#endif
     shouldDisableAsyncSwap = shouldDisableAsyncSwap ||
                              async_query_host_gpu_SyncBlacklisted();
 
@@ -1393,6 +1397,7 @@
         // Situations where not to use mmap() for RAM
         // 1. Using HDD on Linux or macOS; no file mapping or we will have a bad time.
         // 2. macOS when having a machine with < 8 logical cores
+        // 3. Apple Silicon (we probably messed up some file page size or synchronization somewhere)
         if (avd){
             auto contentPath = avdInfo_getContentPath(avd);
             auto diskKind = System::get()->pathDiskKind(contentPath);
@@ -1409,6 +1414,10 @@
             if (numCores < 8) {
                 feature_set_if_not_overridden(kFeature_QuickbootFileBacked, false /* enable */);
             }
+#ifdef __aarch64__
+            // TODO: Fix file-backed RAM snapshot support.
+            feature_set_if_not_overridden(kFeature_QuickbootFileBacked, false /* enable */);
+#endif
 #endif
 
             if (opts->crostini) {
@@ -1621,6 +1630,11 @@
     path_mkdir_if_needed(pstorePath.c_str(), 0777);
     android_chmod(pstorePath.c_str(), 0777);
 
+    // TODO(jansene): pstore conflicts with memory maps on Apple Silicon
+#if defined(__APPLE__) && defined(__aarch64__)
+    mem_map pstore = {.start = 0,
+                      .size = 0 };
+#else
     mem_map pstore = {.start = GOLDFISH_PSTORE_MEM_BASE,
                       .size = GOLDFISH_PSTORE_MEM_SIZE};
 
@@ -1629,6 +1643,7 @@
                    ",file=%s",
                    pstore.start, pstore.size, pstoreFile.c_str());
 
+#endif
     bool firstTimeSetup =
             (android_op_wipe_data || !path_exists(hw->disk_dataPartition_path));
 
@@ -2208,8 +2223,12 @@
                 rendererConfig.selectedRenderer == SELECTED_RENDERER_ANGLE9;
         // Features to disable or enable depending on rendering backend
         // and gpu make/model/version
+#if defined(__APPLE__) && defined(__aarch64__)
+        shouldDisableAsyncSwap = false;
+#else
         shouldDisableAsyncSwap |= !strncmp("arm", kTarget.androidArch, 3) ||
                                   System::get()->getProgramBitness() == 32;
+#endif
         shouldDisableAsyncSwap = shouldDisableAsyncSwap ||
                                  async_query_host_gpu_SyncBlacklisted();
 
diff --git a/android/android-emu-base/android/base/system/System.cpp b/android/android-emu-base/android/base/system/System.cpp
index 215042c..509d5e3 100755
--- a/android/android-emu-base/android/base/system/System.cpp
+++ b/android/android-emu-base/android/base/system/System.cpp
@@ -437,7 +437,7 @@
 
 
     int getHostBitness() const override {
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(__aarch64__)
         return 64;
 #elif defined(_WIN32)
         // Retrieves the path of the WOW64 system directory, which doesn't
@@ -1828,7 +1828,7 @@
     return result;
 }
 
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(__aarch64__)
 // static
 const char* System::kLibSubDir = "lib64";
 // static
diff --git a/android/android-emu-base/android/base/system/System.h b/android/android-emu-base/android/base/system/System.h
index c688aec..dc14aa8 100644
--- a/android/android-emu-base/android/base/system/System.h
+++ b/android/android-emu-base/android/base/system/System.h
@@ -185,7 +185,7 @@
     static System::FileSize getAlignedFileSize(System::FileSize align, System::FileSize size);
 
     // Return the program bitness as an integer, either 32 or 64.
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(__aarch64__)
     static const int kProgramBitness = 64;
 #else
     static const int kProgramBitness = 32;
diff --git a/android/android-emu/android/avd/info.c b/android/android-emu/android/avd/info.c
index d048b9f..cd5703c 100644
--- a/android/android-emu/android/avd/info.c
+++ b/android/android-emu/android/avd/info.c
@@ -1269,8 +1269,18 @@
     return _getFullFilePath(i->contentPath, imageName);
 }
 
+static bool is_armish(const AvdInfo* i);
+
 char*  avdInfo_getSdCardPath( const AvdInfo* i )
 {
+    if (i->apiLevel >=30 && is_armish(i)) {
+        // BUG: 174481551
+        // ignore sdcard for arm when api is >=30, as
+        // it makes setting up the metadata disk id tricky
+        // TODO: figure out better approach
+        dprint("INFO: ignore sdcard for arm at api level >= 30");
+        return NULL;
+    }
     const char* imageName = _imageFileNames[ AVD_IMAGE_SDCARD ];
     char*       path;
 
@@ -1726,6 +1736,32 @@
     *pSkinName = NULL;
     *pSkinDir  = NULL;
 
+    // TODO: Apple Silicon Qt support is spotty; we can't currently use device skins with
+    // our build. So hardcode the skin to the lcd widthxheight.
+#if defined(__APPLE__) && defined(__aarch64__)
+    if (i->configIni != NULL ) {
+        /* We need to create a name.
+         * Make a "magical" name using the screen size from config.ini
+         * (parse_skin_files() in main-common-ui.c parses this name
+         *  to determine the screen size.)
+         */
+        int width = iniFile_getInteger(i->configIni, "hw.lcd.width", 0);
+        int height = iniFile_getInteger(i->configIni, "hw.lcd.height", 0);
+        if (width > 0 && height > 0) {
+            char skinNameBuf[64];
+            snprintf(skinNameBuf, sizeof skinNameBuf, "%dx%d", width, height);
+            skinName = ASTRDUP(skinNameBuf);
+        } else {
+            skinName = ASTRDUP(SKIN_DEFAULT);
+        }
+    } else {
+        skinName = ASTRDUP(SKIN_DEFAULT);
+    }
+
+    *pSkinName = skinName;
+    return;
+#endif
+
     if (!i->contentPath) {
         *pSkinName = ASTRDUP(SKIN_DEFAULT);
         return;
diff --git a/android/android-emu/android/main-kernel-parameters.cpp b/android/android-emu/android/main-kernel-parameters.cpp
index 60de6b3..a589f93 100644
--- a/android/android-emu/android/main-kernel-parameters.cpp
+++ b/android/android-emu/android/main-kernel-parameters.cpp
@@ -289,6 +289,13 @@
         params.addFormat("qemu.camera_protocol_ver=1");
     }
 
+    if (opts->shell || opts->shell_serial || opts->show_kernel) {
+        // The default value for printk.devkmsg is "ratelimit",
+        // causing only a few logs from the android init
+        // executable to be printed.
+        params.addFormat("printk.devkmsg=on");
+    }
+
     // User entered parameters are space separated. Passing false here to prevent
     // parameters from being surrounded by quotes.
     return params.toCStringCopy(false);
diff --git a/android/android-emu/android/opengl/NativeGpuInfo_darwin.cpp b/android/android-emu/android/opengl/NativeGpuInfo_darwin.cpp
index 4b53852..f98890c 100644
--- a/android/android-emu/android/opengl/NativeGpuInfo_darwin.cpp
+++ b/android/android-emu/android/opengl/NativeGpuInfo_darwin.cpp
@@ -110,7 +110,11 @@
     }
 }
 
-// macOS: blacklist
+// macOS: Disable Vulkan for now unless on M1.
 bool isVulkanSafeToUseNative() {
+#ifdef __aarch64__
+    return true;
+#else
     return false;
+#endif
 }
diff --git a/android/android-emu/android/skin/qt/emulator-qt-window.cpp b/android/android-emu/android/skin/qt/emulator-qt-window.cpp
index 3a54fdb..31dc808 100644
--- a/android/android-emu/android/skin/qt/emulator-qt-window.cpp
+++ b/android/android-emu/android/skin/qt/emulator-qt-window.cpp
@@ -773,9 +773,20 @@
 
 void EmulatorQtWindow::showAvdArchWarning() {
     ScopedCPtr<char> arch(avdInfo_getTargetCpuArch(android_avdInfo));
+
+    // On Apple, we could also be running w/ Virtualization.framework
+    // which should also support fast x86 VMs on arm64.
+#if defined(__APPLE__) || defined (__x86_64__)
     if (!strcmp(arch.get(), "x86") || !strcmp(arch.get(), "x86_64")) {
         return;
     }
+#endif
+
+#ifdef __aarch64__
+    if (!strcmp(arch.get(), "arm64")) {
+        return;
+    }
+#endif
 
     // The following statuses indicate that the machine hardware does not
     // support hardware acceleration. These machines should never show a
@@ -2227,6 +2238,12 @@
     simulateSetScale(std::max(.2, std::min(widthScale, heightScale)));
 
     maskWindowFrame();
+#ifdef __APPLE__
+    // To fix issues when resizing + linking against macos sdk 11.
+    SkinEvent* changeEvent = new SkinEvent();
+    changeEvent->type = kEventScreenChanged;
+    queueSkinEvent(changeEvent);
+#endif
 }
 
 void EmulatorQtWindow::resizeAndChangeAspectRatio(bool isFolded) {
@@ -2970,6 +2987,13 @@
     if (android_foldable_is_folded()) {
         resizeAndChangeAspectRatio(true);
     }
+
+#ifdef __APPLE__
+    // To fix issues when resizing + linking against macos sdk 11.
+    SkinEvent* changeEvent = new SkinEvent();
+    changeEvent->type = kEventScreenChanged;
+    queueSkinEvent(changeEvent);
+#endif
 }
 
 void EmulatorQtWindow::setVisibleExtent(QBitmap bitMap) {
diff --git a/android/android-emu/android/skin/qt/extended-pages/bug-report-page.cpp b/android/android-emu/android/skin/qt/extended-pages/bug-report-page.cpp
index 86faeb7..8101c95 100644
--- a/android/android-emu/android/skin/qt/extended-pages/bug-report-page.cpp
+++ b/android/android-emu/android/skin/qt/extended-pages/bug-report-page.cpp
@@ -202,7 +202,7 @@
     mBugTracker->increment("ON_SAVE");
     QString dirName = QString::fromStdString(mSavingStates.saveLocation);
     dirName = QFileDialog::getExistingDirectory(
-            Q_NULLPTR, tr("Report Saving Location"), dirName);
+            this, tr("Report Saving Location"), dirName);
     if (dirName.isNull())
         return;
     auto savingPath = PathUtils::join(dirName.toStdString(),
diff --git a/android/android-emu/android/snapshot/MemoryWatch_darwin.cpp b/android/android-emu/android/snapshot/MemoryWatch_darwin.cpp
index df4a989..c1e4475 100644
--- a/android/android-emu/android/snapshot/MemoryWatch_darwin.cpp
+++ b/android/android-emu/android/snapshot/MemoryWatch_darwin.cpp
@@ -117,11 +117,7 @@
             uint64_t gpa, size;
             int count = hva2gpa_call(start, 1, 1, &gpa, &size);
             if (count) {
-#ifdef __x86_64__
                 guest_mem_protect_call(gpa, length, HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
-#else
-                guest_mem_protect_call(gpa, length, 0);
-#endif
             }
         }
         return true;
diff --git a/android/android-emu/android/snapshot/Snapshot.cpp b/android/android-emu/android/snapshot/Snapshot.cpp
index ff6e9ca..c8aa27a 100644
--- a/android/android-emu/android/snapshot/Snapshot.cpp
+++ b/android/android-emu/android/snapshot/Snapshot.cpp
@@ -383,7 +383,7 @@
 };
 
 // Calculate snapshot version based on a base version plus featurecontrol-derived integer.
-static constexpr int kVersionBase = 63;
+static constexpr int kVersionBase = 64;
 static_assert(kVersionBase < (1 << 20), "Base version number is too high.");
 
 #define FEATURE_CONTROL_ITEM(item) + 1
diff --git a/android/android-emu/android/snapshot/common.h b/android/android-emu/android/snapshot/common.h
index 95dcf20..d0b8840 100644
--- a/android/android-emu/android/snapshot/common.h
+++ b/android/android-emu/android/snapshot/common.h
@@ -109,7 +109,11 @@
 
 bool isBufferZeroed(const void* ptr, int32_t size);
 
+#if defined(__APPLE__) && defined(__aarch64__)
+constexpr int32_t kDefaultPageSize = 16384;
+#else
 constexpr int32_t kDefaultPageSize = 4096;
+#endif
 
 constexpr int32_t kCancelTimeoutMs = 15000;
 
diff --git a/android/build/cmake/android.cmake b/android/build/cmake/android.cmake
index 6ad574b..07dbb0e 100644
--- a/android/build/cmake/android.cmake
+++ b/android/build/cmake/android.cmake
@@ -22,6 +22,12 @@
 # We want to make sure all the cross targets end up in a unique location
 set(ANDROID_CROSS_BUILD_DIRECTORY ${CMAKE_BINARY_DIR}/build/${ANDROID_HOST_TAG})
 
+set(ANDROID_XCODE_SIGN_ADHOC FALSE)
+
+if (APPLE AND BUILDING_FOR_AARCH64)
+    set(ANDROID_XCODE_SIGN_ADHOC TRUE)
+endif()
+
 # Checks to make sure the TAG is valid.
 function(_check_target_tag TAG)
   set(VALID_TARGETS
@@ -277,6 +283,13 @@
   endif()
 endfunction()
 
+function(android_sign path)
+    if (ANDROID_XCODE_SIGN_ADHOC)
+        install(
+            CODE "message(\"android_sign ${path}\")\nexecute_process(COMMAND codesign -s - --entitlements ${ANDROID_QEMU2_TOP_DIR}/entitlements.plist ${path})")
+    endif()
+endfunction()
+
 # ~~~
 # Registers the given library, by calculating the source set and setting licensens.
 #
@@ -1231,6 +1244,7 @@
   android_extract_symbols(${TGT})
   android_upload_symbols(${TGT})
   android_install_license(${TGT} ${DST}/${TGT}${CMAKE_EXECUTABLE_SUFFIX})
+  android_sign(${CMAKE_INSTALL_PREFIX}/${DST}/${TGT}${CMAKE_EXECUTABLE_SUFFIX})
 endfunction()
 
 # Installs the given shared library. The shared library will end up in ../lib64
@@ -1243,6 +1257,8 @@
   android_extract_symbols(${TGT})
   android_upload_symbols(${TGT})
   android_install_license(${TGT} ${TGT}${CMAKE_SHARED_LIBRARY_SUFFIX})
+  # Account for lib prefix when signing
+  android_sign(${CMAKE_INSTALL_PREFIX}/lib64/lib${TGT}${CMAKE_SHARED_LIBRARY_SUFFIX})
 endfunction()
 
 # Strips the given prebuilt executable during install..
diff --git a/android/build/cmake/config/emu-vulkan-config.cmake b/android/build/cmake/config/emu-vulkan-config.cmake
index 3d43c70..0f41774 100644
--- a/android/build/cmake/config/emu-vulkan-config.cmake
+++ b/android/build/cmake/config/emu-vulkan-config.cmake
@@ -94,6 +94,11 @@
       "${PREBUILT_ROOT}/layers/VkLayer_unique_objects.json>testlib64/layers/VkLayer_unique_objects.json"
       # shaders
       ${VULKAN_COMMON_DEPENDENCIES})
+elseif(DARWIN_AARCH64)
+  set(VULKAN_DEPENDENCIES
+      # MoltenVK
+      "${PREBUILT_ROOT}/icds/libMoltenVK.dylib>lib64/vulkan/libMoltenVK.dylib"
+      "${PREBUILT_ROOT}/icds/MoltenVK_icd.json>lib64/vulkan/MoltenVK_icd.json")
 elseif(WINDOWS)
   get_filename_component(
     PREBUILT_ROOT
diff --git a/cpus.c b/cpus.c
index d773e97..99411b9 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1803,7 +1803,12 @@
     }
 #endif /* CONFIG_HAX */
 #ifdef CONFIG_HVF
-    if (hvf_enabled()) { cpu_exit(cpu); }
+    if (hvf_enabled()) {
+        cpu_exit(cpu);
+#if defined(__aarch64__) && defined(CONFIG_HVF)
+        hvf_exit_vcpu(cpu);
+#endif
+    }
 #endif /* CONFIG_HVf */
 #else /* _WIN32 */
 #ifdef CONFIG_GVM
diff --git a/hvf-stub.c b/hvf-stub.c
index 59d9526..5e1dded 100644
--- a/hvf-stub.c
+++ b/hvf-stub.c
@@ -50,3 +50,5 @@
 }
 
 void hvf_cpu_synchronize_state(CPUState *cpu) {}
+
+void hvf_exit_vcpu(CPUState* cpu) { (void)cpu; }
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 6e5a1ef..c9e2cbb 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -94,6 +94,12 @@
 
 #define PLATFORM_BUS_NUM_IRQS 64
 
+#if defined(__APPLE__) && defined(__aarch64__)
+#define APPLE_SILICON 1
+#else
+#define APPLE_SILICON 0
+#endif
+
 static QemuDeviceTreeSetupFunc virt_device_tree_setup_func;
 void qemu_device_tree_setup_callback2(QemuDeviceTreeSetupFunc setup_func)
 {
@@ -116,7 +122,7 @@
  * of a terabyte of RAM will be doing it on a host with more than a
  * terabyte of physical address space.)
  */
-#if defined(__APPLE__) && defined(__arm64__)
+#if APPLE_SILICON
 #define RAMLIMIT_GB 31
 #else
 #define RAMLIMIT_GB 255
@@ -159,13 +165,17 @@
     [RANCHU_GOLDFISH_BATTERY] = { 0x0a040000, 0x00001000 },
     [RANCHU_GOLDFISH_EVDEV] =   { 0x0a050000, 0x00001000 },
     [RANCHU_GOLDFISH_PIPE] =    { 0x0a060000, 0x00002000 },
+// TODO(bohu): check on rockpi to see if the sync device works there too
+#if APPLE_SILICON
+    [RANCHU_GOLDFISH_SYNC] =    { 0x0a070000, 0x00002000 },
+#endif
     [VIRT_PLATFORM_BUS] =       { 0x0c000000, 0x02000000 },
     [VIRT_SECURE_MEM] =         { 0x0e000000, 0x01000000 },
     [VIRT_PCIE_MMIO] =          { 0x10000000, 0x2eff0000 },
     [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
     [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
     [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
-#if defined(__APPLE__) && defined(__arm64__)
+#if APPLE_SILICON
     /* Second PCIe window, 32GB wide at the 32GB boundary */
     /* This used to be 512GB, but doesn't work on Apple Silicon
      * as Apple Silicon only has 36 bits of address space. */
@@ -189,6 +199,9 @@
     [RANCHU_GOLDFISH_AUDIO] = 18,
     [RANCHU_GOLDFISH_EVDEV] = 19,
     [RANCHU_GOLDFISH_PIPE] = 20,
+#if APPLE_SILICON
+    [RANCHU_GOLDFISH_SYNC] = 21,
+#endif
     [VIRT_MMIO] = 32, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
     [VIRT_GIC_V2M] = 64, /* ...to 48 + NUM_GICV2M_SPIS - 1 */
     [VIRT_PLATFORM_BUS] = 128, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
@@ -1512,9 +1525,11 @@
     create_simple_device(vms, pic, RANCHU_GOLDFISH_PIPE, "goldfish_pipe",
                          "google,android-pipe\0"
                          "generic,android-pipe", 2, 0, 0);
-    //create_simple_device(vms, pic, RANCHU_GOLDFISH_SYNC, "goldfish_sync",
-     //                    "google,goldfish-sync\0"
-      //                   "generic,goldfish-sync", 2, 0, 0);
+#if APPLE_SILICON
+    create_simple_device(vms, pic, RANCHU_GOLDFISH_SYNC, "goldfish_sync",
+                         "google,goldfish-sync\0"
+                         "generic,goldfish-sync", 2, 0, 0);
+#endif
     /* Create mmio transports, so the user can create virtio backends
      * (which will be automatically plugged in to the transports). If
      * no backend is created the transport will just sit harmlessly idle.
diff --git a/hw/misc/goldfish_sync.c b/hw/misc/goldfish_sync.c
index 80fc248..e44adb3 100644
--- a/hw/misc/goldfish_sync.c
+++ b/hw/misc/goldfish_sync.c
@@ -243,6 +243,8 @@
     // host (the linked list) are processed. At that point, IRQ
     // is lowered.
     case SYNC_REG_BATCH_COMMAND:
+        if (!s->batch_cmd_addr) return 0;
+
         s->current = goldfish_sync_pop_first_cmd(s);
         if (!s->current) {
             DPRINT("Out of pending commands. Lower IRQ.");
@@ -318,6 +320,8 @@
     // |goldfish_sync_timeline_inc| commands because that would
     // decrease performance.
     case SYNC_REG_BATCH_COMMAND:
+        if (!s->batch_cmd_addr) break;
+
         DPRINT("write SYNC_REG_BATCH_COMMAND. obtaining batch cmd vals.");
         struct goldfish_sync_batch_cmd incoming = {
             .cmd = 0,
@@ -344,6 +348,8 @@
     // that matters is SYNC_GUEST_CMD_TRIGGER_HOST_WAIT, which is used
     // to cause a OpenGL client wait on the host GPU/CPU.
     case SYNC_REG_BATCH_GUESTCOMMAND:
+        if (!s->batch_guestcmd_addr) break;
+
         DPRINT("write SYNC_REG_BATCH_GUESTCOMMAND. obtaining batch cmd vals.");
         struct goldfish_sync_batch_guestcmd guest_incoming = {
             .host_command = 0,
@@ -508,6 +514,8 @@
 
     s_goldfish_sync_dev = s;
 
+    s->batch_cmd_addr = 0;
+    s->batch_guestcmd_addr = 0;
     s->pending = NULL;
     s->current = NULL;
 
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
index 3e5d584..573c05f 100644
--- a/include/sysemu/hvf.h
+++ b/include/sysemu/hvf.h
@@ -59,4 +59,6 @@
 
 void hvf_irq_deactivated(int cpu, int irq);
 
+void hvf_exit_vcpu(CPUState *cpu);
+
 #endif /* _HVF_H */
diff --git a/mac.source.properties b/mac.source.properties
index 5445d4a..e9d800e 100644
--- a/mac.source.properties
+++ b/mac.source.properties
@@ -1,4 +1,4 @@
 Pkg.UserSrc=false
-Pkg.Revision=30.3.2
+Pkg.Revision=30.3.3
 Pkg.Path=emulator
 Pkg.Desc=Android Emulator
diff --git a/source.properties b/source.properties
index 5445d4a..e9d800e 100644
--- a/source.properties
+++ b/source.properties
@@ -1,4 +1,4 @@
 Pkg.UserSrc=false
-Pkg.Revision=30.3.2
+Pkg.Revision=30.3.3
 Pkg.Path=emulator
 Pkg.Desc=Android Emulator
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
index 3856853..16b8e76 100644
--- a/target/arm/arm-powerctl.c
+++ b/target/arm/arm-powerctl.c
@@ -260,9 +260,14 @@
         return QEMU_ARM_POWERCTL_IS_OFF;
     }
 
-    /* Queue work to run under the target vCPUs context */
-    async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
-                     RUN_ON_CPU_NULL);
+    if (hvf_enabled()) {
+        target_cpu_state->halted = 1;
+        hvf_exit_vcpu(target_cpu_state);
+    } else {
+        /* Queue work to run under the target vCPUs context */
+        async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
+                         RUN_ON_CPU_NULL);
+    }
 
     return QEMU_ARM_POWERCTL_RET_SUCCESS;
 }
diff --git a/target/arm/hvf.c b/target/arm/hvf.c
index 7fcbd5f..2e3cda7 100644
--- a/target/arm/hvf.c
+++ b/target/arm/hvf.c
@@ -94,6 +94,36 @@
     int num_slots;
 };
 
+struct hvf_migration_state {
+    uint64_t ticks;
+};
+
+struct hvf_migration_state mig_state;
+
+static int hvf_mig_state_pre_save(void* opaque) {
+    struct hvf_migration_state* m = opaque;
+    m->ticks -= mach_absolute_time();
+    return 0;
+}
+
+static int hvf_mig_state_post_load(void* opaque) {
+    struct hvf_migration_state* m = opaque;
+    m->ticks += mach_absolute_time();
+    return 0;
+}
+
+const VMStateDescription vmstate_hvf_migration = {
+    .name = "hvf-migration",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .pre_save = hvf_mig_state_pre_save,
+    .post_load = hvf_mig_state_post_load,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(ticks, struct hvf_migration_state),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
 pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;
 struct hvf_accel_state* hvf_state;
 
@@ -485,102 +515,17 @@
 // VCPU init////////////////////////////////////////////////////////////////////
 
 int hvf_enabled() { return !hvf_disabled; }
+
 void hvf_disable(int shouldDisable) {
     hvf_disabled = shouldDisable;
 }
 
-void vmx_reset_vcpu(CPUState *cpu) {
-
-    // TODO-convert-to-arm64
-    // wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
-    // macvm_set_cr0(cpu->hvf_fd, 0x60000010);
-
-    // wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
-    // wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
-
-    // // set VMCS guest state fields
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
-
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
-
-    // //wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
-    // wvmcs(cpu->hvf_fd, VMCS_GUEST_DR7, 0x0);
-
-    // wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
-    // wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
-    // wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
-    // wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
-    // wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
-
-    // for (int i = 0; i < 8; i++)
-    //      wreg(cpu->hvf_fd, HV_X86_R8+i, 0x0);
-
-    // hv_vm_sync_tsc(0);
-    // cpu->halted = 0;
-    // hv_vcpu_invalidate_tlb(cpu->hvf_fd);
-    // hv_vcpu_flush(cpu->hvf_fd);
-}
-
 int hvf_init_vcpu(CPUState * cpu) {
     DPRINTF("%s: entry. cpu: %p\n", __func__, cpu);
 
     ARMCPU *armcpu;
 
     int r;
-    // TODO-convert-to-arm64
-    // init_emu(cpu);
-    // init_decoder(cpu);
-    // init_cpuid(cpu);
 
     cpu->hvf_caps = (struct hvf_vcpu_caps*)g_malloc0(sizeof(struct hvf_vcpu_caps));
     DPRINTF("%s: create a vcpu config and query its values\n", __func__);
@@ -608,59 +553,11 @@
     DPRINTF("%s: Setting debug register accesses to not exit the guest...\n", __func__);
     HVF_CHECKED_CALL(hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf_fd, false));
 
-    // DPRINTF("%s: Setting pc to 0x8a0\n", __func__);
-    // HVF_CHECKED_CALL(hv_vcpu_set_reg(cpu->hvf_fd, HV_REG_PC, 0x40000000ULL));
-
     cpu->hvf_vcpu_dirty = 1;
     assert_hvf_ok(r);
 
     cpu->hvf_irq_pending = false;
     cpu->hvf_fiq_pending = false;
-
-    // TODO-convert-to-arm64
-	// if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &cpu->hvf_caps->vmx_cap_pinbased))
-	// 	qemu_abort("%s: error getting vmx capability HV_VMX_CAP_PINBASED\n", __func__);
-	// if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cpu->hvf_caps->vmx_cap_procbased))
-	// 	qemu_abort("%s: error getting vmx capability HV_VMX_CAP_PROCBASED\n", __func__);
-	// if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cpu->hvf_caps->vmx_cap_procbased2))
-	// 	qemu_abort("%s: error getting vmx capability HV_VMX_CAP_PROCBASED2\n", __func__);
-	// if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cpu->hvf_caps->vmx_cap_entry))
-	// 	qemu_abort("%s: error getting vmx capability HV_VMX_CAP_ENTRY\n", __func__);
-
-	// /* set VMCS control fields */
-    // wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_pinbased, 0));
-    // wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_procbased,
-    //                                                VMCS_PRI_PROC_BASED_CTLS_HLT |
-    //                                                VMCS_PRI_PROC_BASED_CTLS_MWAIT |
-    //                                                VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
-    //                                                VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
-    //                                                VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
-	// wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
-    //       cap2ctrl(cpu->hvf_caps->vmx_cap_procbased2,VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
-
-	// wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_entry, 0));
-	// wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
-
-    // wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
-
-    // vmx_reset_vcpu(cpu);
-
-    armcpu = ARM_CPU(cpu);
-    // x86cpu->env.kvm_xsave_buf = qemu_memalign(16384, sizeof(struct hvf_xsave_buf));
-
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
-    // //hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
-    // hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
-
     return 0;
 }
 
@@ -668,9 +565,6 @@
 
 int hvf_vcpu_emulation_mode(CPUState* cpu) {
     DPRINTF("%s: call\n", __func__);
-    CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
-    // TODO-convert-to-arm64
-    // return !(env->cr[0] & CR0_PG_MASK);
     return 0;
 }
 
@@ -694,6 +588,7 @@
     DPRINTF("%s: call\n", __func__);
     return 0;
 }
+
 static hv_reg_t regno_to_hv_xreg(int i) {
     switch (i) {
         case 0: return HV_REG_X0;
@@ -810,6 +705,10 @@
     ARMCPU *armcpu = ARM_CPU(cpu);
     CPUARMState *env = &armcpu->env;
 
+    // Sync up CNTVOFF_EL2
+    env->cp15.cntvoff_el2 = mig_state.ticks;
+    HVF_CHECKED_CALL(hv_vcpu_set_vtimer_offset(cpu->hvf_fd, env->cp15.cntvoff_el2));
+
     // Set HVF general registers
     {
         // HV_REG_LR = HV_REG_X30,
@@ -954,7 +853,8 @@
         // HVF_CHECKED_CALL(hv_vcpu_set_reg(cpu->hvf_fd, HV_SYS_REG_AMAIR_EL1, ???));
 
         HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTKCTL_EL1, env->cp15.c14_cntkctl));
-        // HVF_CHECKED_CALL(hv_vcpu_set_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CVAL_EL0, ???));
+        HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CTL_EL0, env->cp15.c14_timer[GTIMER_VIRT].ctl));
+        HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CVAL_EL0, env->cp15.c14_timer[GTIMER_VIRT].cval));
         HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CONTEXTIDR_EL1, env->cp15.contextidr_el[1]));
         HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CPACR_EL1, env->cp15.cpacr_el1));
         HVF_CHECKED_CALL(hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_CSSELR_EL1, env->cp15.csselr_el[1]));
@@ -1154,7 +1054,8 @@
         // HVF_CHECKED_CALL(hv_vcpu_set_reg(cpu->hvf_fd, HV_SYS_REG_AMAIR_EL1, ???));
 
         HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTKCTL_EL1, &env->cp15.c14_cntkctl));
-        // HVF_CHECKED_CALL(hv_vcpu_set_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CVAL_EL0, ???));
+        HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CTL_EL0, &(env->cp15.c14_timer[GTIMER_VIRT].ctl)));
+        HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CVAL_EL0, &(env->cp15.c14_timer[GTIMER_VIRT].cval)));
         HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CONTEXTIDR_EL1, &env->cp15.contextidr_el[1]));
         HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CPACR_EL1, &env->cp15.cpacr_el1));
         HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CSSELR_EL1, &env->cp15.csselr_el[1]));
@@ -1225,241 +1126,29 @@
     cpu_state->hvf_vcpu_dirty = false;
 }
 
-void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
-{
+void hvf_cpu_synchronize_post_reset(CPUState *cpu_state) {
     run_on_cpu(cpu_state, __hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
 }
 
-void _hvf_cpu_synchronize_post_init(CPUState* cpu_state, run_on_cpu_data data)
-{
+void _hvf_cpu_synchronize_post_init(CPUState* cpu_state, run_on_cpu_data data) {
     DPRINTF("%s: call\n", __func__);
     (void)data;
     hvf_put_registers(cpu_state);
     cpu_state->hvf_vcpu_dirty = false;
 }
 
-void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
-{
+void hvf_cpu_synchronize_post_init(CPUState *cpu_state) {
     run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
 }
 
-void hvf_cpu_clean_state(CPUState *cpu_state)
-{
+void hvf_cpu_clean_state(CPUState *cpu_state) {
     cpu_state->hvf_vcpu_dirty = 0;
 }
 
-void vmx_clear_int_window_exiting(CPUState *cpu);
-
-// TODO-convert-to-arm64
-static bool ept_emulation_fault(uint64_t ept_qual)
-{
-    return false;
-
-	// int read, write;
-
-	// /* EPT fault on an instruction fetch doesn't make sense here */
-	// if (ept_qual & EPT_VIOLATION_INST_FETCH)
-	// 	return false;
-
-	// /* EPT fault must be a read fault or a write fault */
-	// read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
-	// write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
-	// if ((read | write) == 0)
-	// 	return false;
-
-	// /*
-	//  * The EPT violation must have been caused by accessing a
-	//  * guest-physical address that is a translation of a guest-linear
-	//  * address.
-	//  */
-	// if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
-	//     (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
-	// 	return false;
-	// }
-
-	// return true;
-}
-
-// TODO: taskswitch handling
-static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
-{
-    /* CR3 and ldt selector are not saved intentionally */
-    // TODO-convert-to-arm64
-    // tss->eip = EIP(cpu);
-    // tss->eflags = EFLAGS(cpu);
-    // tss->eax = EAX(cpu);
-    // tss->ecx = ECX(cpu);
-    // tss->edx = EDX(cpu);
-    // tss->ebx = EBX(cpu);
-    // tss->esp = ESP(cpu);
-    // tss->ebp = EBP(cpu);
-    // tss->esi = ESI(cpu);
-    // tss->edi = EDI(cpu);
-
-    // tss->es = vmx_read_segment_selector(cpu, REG_SEG_ES).sel;
-    // tss->cs = vmx_read_segment_selector(cpu, REG_SEG_CS).sel;
-    // tss->ss = vmx_read_segment_selector(cpu, REG_SEG_SS).sel;
-    // tss->ds = vmx_read_segment_selector(cpu, REG_SEG_DS).sel;
-    // tss->fs = vmx_read_segment_selector(cpu, REG_SEG_FS).sel;
-    // tss->gs = vmx_read_segment_selector(cpu, REG_SEG_GS).sel;
-}
-
-static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
-{
-//     wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
-// 
-//     RIP(cpu) = tss->eip;
-//     EFLAGS(cpu) = tss->eflags | 2;
-// 
-//     /* General purpose registers */
-//     RAX(cpu) = tss->eax;
-//     RCX(cpu) = tss->ecx;
-//     RDX(cpu) = tss->edx;
-//     RBX(cpu) = tss->ebx;
-//     RSP(cpu) = tss->esp;
-//     RBP(cpu) = tss->ebp;
-//     RSI(cpu) = tss->esi;
-//     RDI(cpu) = tss->edi;
-// 
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->ldt}, REG_SEG_LDTR);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->es}, REG_SEG_ES);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->cs}, REG_SEG_CS);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->ss}, REG_SEG_SS);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->ds}, REG_SEG_DS);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->fs}, REG_SEG_FS);
-//     vmx_write_segment_selector(cpu, (x68_segment_selector){tss->gs}, REG_SEG_GS);
-// 
-// #if 0
-//     load_segment(cpu, REG_SEG_LDTR, tss->ldt);
-//     load_segment(cpu, REG_SEG_ES, tss->es);
-//     load_segment(cpu, REG_SEG_CS, tss->cs);
-//     load_segment(cpu, REG_SEG_SS, tss->ss);
-//     load_segment(cpu, REG_SEG_DS, tss->ds);
-//     load_segment(cpu, REG_SEG_FS, tss->fs);
-//     load_segment(cpu, REG_SEG_GS, tss->gs);
-// #endif
-}
-
-// static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
-//                           uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
-// {
-//     struct x86_tss_segment32 tss_seg;
-//     uint32_t new_tss_base = x86_segment_base(new_desc);
-//     uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
-//     uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
-// 
-//     vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
-//     save_state_to_tss32(cpu, &tss_seg);
-// 
-//     vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
-//     vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
-// 
-//     if (old_tss_sel.sel != 0xffff) {
-//         tss_seg.prev_tss = old_tss_sel.sel;
-// 
-//         vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
-//     }
-//     load_state_from_tss32(cpu, &tss_seg);
-//     return 0;
-// }
-
-// static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
-// {
-//     uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
-//     if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
-//                         gate_type != VMCS_INTR_T_HWINTR &&
-//                         gate_type != VMCS_INTR_T_NMI)) {
-//         int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
-//         macvm_set_rip(cpu, rip + ins_len);
-//         return;
-//     }
-// 
-//     load_regs(cpu);
-// 
-//     struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
-//     int ret;
-//     x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, REG_SEG_TR);
-//     uint64_t old_tss_base = vmx_read_segment_base(cpu, REG_SEG_TR);
-//     uint32_t desc_limit;
-//     struct x86_call_gate task_gate_desc;
-//     struct vmx_segment vmx_seg;
-// 
-//     x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
-//     x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
-// 
-//     if (reason == TSR_IDT_GATE && gate_valid) {
-//         int dpl;
-// 
-//         ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
-// 
-//         dpl = task_gate_desc.dpl;
-//         x68_segment_selector cs = vmx_read_segment_selector(cpu, REG_SEG_CS);
-//         if (tss_sel.rpl > dpl || cs.rpl > dpl)
-//             DPRINTF("emulate_gp");
-//     }
-// 
-//     desc_limit = x86_segment_limit(&next_tss_desc);
-//     if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
-//         VM_PANIC("emulate_ts");
-//     }
-// 
-//     if (reason == TSR_IRET || reason == TSR_JMP) {
-//         curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
-//         x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
-//     }
-// 
-//     if (reason == TSR_IRET)
-//         EFLAGS(cpu) &= ~RFLAGS_NT;
-// 
-//     if (reason != TSR_CALL && reason != TSR_IDT_GATE)
-//         old_tss_sel.sel = 0xffff;
-// 
-//     if (reason != TSR_IRET) {
-//         next_tss_desc.type |= (1 << 1); /* set busy flag */
-//         x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
-//     }
-// 
-//     if (next_tss_desc.type & 8)
-//         ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
-//     else
-//         //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
-//         VM_PANIC("task_switch_16");
-// 
-//     macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
-//     x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
-//     vmx_write_segment_descriptor(cpu, &vmx_seg, REG_SEG_TR);
-// 
-//     store_regs(cpu);
-// 
-//     hv_vcpu_invalidate_tlb(cpu->hvf_fd);
-//     hv_vcpu_flush(cpu->hvf_fd);
-// }
-
-/* Find first bit starting from msb */
-static int apic_fls_bit(uint32_t value)
-{
-    return 31 - clz32(value);
-}
-
-/* Find first bit starting from lsb */
-static int apic_ffs_bit(uint32_t value)
-{
-    return ctz32(value);
-}
-
-static inline void apic_reset_bit(uint32_t *tab, int index)
-{
-    int i, mask;
-    i = index >> 5;
-    mask = 1 << (index & 0x1f);
-    tab[i] &= ~mask;
-}
-
-#define VECTORING_INFO_VECTOR_MASK     0xff
-
 static void hvf_handle_interrupt(CPUState * cpu, int mask) {
     cpu->interrupt_request |= mask;
     if (!qemu_cpu_is_self(cpu)) {
+        hv_vcpus_exit(&cpu->hvf_fd, 1);
         qemu_cpu_kick(cpu);
     }
 }
@@ -1467,14 +1156,9 @@
 static inline void hvf_skip_instr(CPUState* cpu) {
     ARMCPU *armcpu = ARM_CPU(cpu);
     CPUARMState *env = &armcpu->env;
-
     env->pc += 4;
 }
 
-static void hvf_read_mem(struct CPUState* cpu, void *data, uint64_t gpa, int bytes) {
-    address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, bytes, 0);
-}
-
 static uint64_t hvf_read_rt(CPUState* cpu, unsigned long rt) {
     return rt == 31 ? 0 : ARM_CPU(cpu)->env.xregs[rt];
 }
@@ -1486,10 +1170,16 @@
 }
 
 static void hvf_handle_wfx(CPUState* cpu) {
+    ARMCPU *armcpu = ARM_CPU(cpu);
+    CPUARMState *env = &armcpu->env;
+
     uint64_t cval;
     HVF_CHECKED_CALL(hv_vcpu_get_sys_reg(cpu->hvf_fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval));
 
-    int64_t ticks_to_sleep = cval - mach_absolute_time();
+    // mach_absolute_time() is an absolute host tick number. We
+    // have set up the guest to use the host tick number offset
+    // by env->cp15.cntvoff_el2.
+    int64_t ticks_to_sleep = cval - (mach_absolute_time() - env->cp15.cntvoff_el2);
     if (ticks_to_sleep < 0) {
         return;
     }
@@ -1843,18 +1533,16 @@
     hv_vcpu_set_vtimer_mask(cpu->hvf_fd, false);
 }
 
+void hvf_exit_vcpu(CPUState *cpu) {
+    hv_vcpus_exit(&cpu->hvf_fd, 1);
+    qemu_cpu_kick(cpu);
+}
+
 int hvf_vcpu_exec(CPUState* cpu) {
     ARMCPU* armcpu = ARM_CPU(cpu);
     CPUARMState* env = &armcpu->env;
-    int ret = 0;
-    uint64_t pc;
-    uint64_t val;
-    int i;
 
-    // TODO-convert-to-arm64
-    // uint64_t rip = 0;
-
-    // armcpu->halted = 0;
+    cpu->halted = 0;
 
     if (hvf_process_events(armcpu)) {
         qemu_mutex_unlock_iothread();
@@ -1862,7 +1550,6 @@
         qemu_mutex_lock_iothread();
         return EXCP_HLT;
     }
-
 again:
 
 
@@ -1873,26 +1560,13 @@
             cpu->hvf_vcpu_dirty = false;
         }
 
-        // TODO-convert-to-arm64
-        // cpu->hvf_x86->interruptable =
-        //     !(rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
-        //     (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
-
         hvf_inject_interrupts(cpu);
-        // TODO-convert-to-arm64
-        // vmx_update_tpr(cpu);
+
+        if (cpu->halted) {
+            return EXCP_HLT;
+        }
 
         qemu_mutex_unlock_iothread();
-        // TODO-convert-to-arm64
-        // while (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
-        //     qemu_mutex_lock_iothread();
-        //     return EXCP_HLT;
-        // }
-
-
-        HVF_CHECKED_CALL(hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_PC, &pc));
-        hvf_read_mem(cpu, &val, pc, 8);
-        DPRINTF("%s: run vcpu. pc: 0x%llx 8 bytes at pc: 0x%llx\n", __func__, (unsigned long long)pc, (unsigned long long)val);
 
         int r  = hv_vcpu_run(cpu->hvf_fd);
 
@@ -1900,56 +1574,19 @@
             qemu_abort("%s: run failed with 0x%x\n", __func__, r);
         }
 
-//  * @typedef    hv_vcpu_exit_t
-//  * @abstract   Contains information about an exit from the vcpu to the host.
-//
-//  * @typedef    hv_vcpu_exit_exception_t
-//  * @abstract   Contains details of a vcpu exception.
-//  */
-// typedef struct {
-//     hv_exception_syndrome_t syndrome;
-//     hv_exception_address_t virtual_address;
-//     hv_ipa_t physical_address;
-// } hv_vcpu_exit_exception_t;
-//  
-//  */
-// typedef struct {
-//     hv_exit_reason_t reason;
-//     hv_vcpu_exit_exception_t exception;
-// } hv_vcpu_exit_t;
-
-
         DPRINTF("%s: Exit info: reason: %#x exception: syndrome %#x va pa %#llx %#llx\n", __func__,
                 cpu->hvf_vcpu_exit_info->reason,
                 cpu->hvf_vcpu_exit_info->exception.syndrome,
                 (unsigned long long)cpu->hvf_vcpu_exit_info->exception.virtual_address,
                 (unsigned long long)cpu->hvf_vcpu_exit_info->exception.physical_address);
-        /* handle VMEXIT */
-        // TODO-convert-to-arm64
-        // uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
-        // uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
-        // uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
-        // uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
-        // rip = rreg(cpu->hvf_fd, HV_X86_RIP);
-        // RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
-        // env->eflags = RFLAGS(cpu);
 
         qemu_mutex_lock_iothread();
 
-        // TODO-convert-to-arm64
-        // update_apic_tpr(cpu);
         current_cpu = cpu;
 
-        ret = 0;
-
-        // TODO-convert-to-arm64
-        uint8_t ec = 0x3f & ((cpu->hvf_vcpu_exit_info->exception.syndrome) >> 26);
-        uint64_t val;
-        HVF_CHECKED_CALL(hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_PC, &val));
-        DPRINTF("%s: Exit at PC 0x%llx\n", __func__, (unsigned long long)val);
         switch (cpu->hvf_vcpu_exit_info->reason) {
             case HV_EXIT_REASON_CANCELED:
-                break;
+                return EXCP_INTERRUPT;
             case HV_EXIT_REASON_EXCEPTION:
                 DPRINTF("%s: handle exception\n", __func__);
                 hvf_handle_exception(cpu);
@@ -1963,9 +1600,9 @@
                 abort();
                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
         }
-    } while (ret == 0);
+    } while (true);
 
-    return ret;
+    return 0;
 }
 
 int hvf_smp_cpu_exec(CPUState * cpu)
@@ -1992,6 +1629,8 @@
     DPRINTF("%s: call. hv vm create?\n", __func__);
     int r = hv_vm_create(0);
 
+    mig_state.ticks = 0;
+
     if (!check_hvf_ok(r)) {
         hv_vm_destroy();
         return -EINVAL;
@@ -2013,6 +1652,9 @@
     qemu_set_user_backed_mapping_funcs(
         hvf_user_backed_ram_map,
         hvf_user_backed_ram_unmap);
+
+    vmstate_register(NULL, 0, &vmstate_hvf_migration, &mig_state);
+
     return 0;
 }
 
diff --git a/win.source.properties b/win.source.properties
index 5445d4a..e9d800e 100644
--- a/win.source.properties
+++ b/win.source.properties
@@ -1,4 +1,4 @@
 Pkg.UserSrc=false
-Pkg.Revision=30.3.2
+Pkg.Revision=30.3.3
 Pkg.Path=emulator
 Pkg.Desc=Android Emulator