darwin-aarch64: hvf: Add exit handlers skeleton
Bug: 173766929
Based on KVM, this adds code paths for handling all ESR_EL2 exits.
They all abort due to not yet being implemented.
This implements some of the MMIO handling path in KVM. Not yet actually
emulating any MMIO instructions yet.
Change-Id: Ieecdf20a93788236f4d3fc09db8c78f107569b53
diff --git a/target/arm/esr.h b/target/arm/esr.h
new file mode 100644
index 0000000..2010257
--- /dev/null
+++ b/target/arm/esr.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// Taken from
+// arch/arm64/include/asm/esr.h
+// Linux 5.10-rc5
+
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#ifndef __ASM_ESR_H
+#define __ASM_ESR_H
+
+// #include <asm/memory.h>
+// #include <asm/sysreg.h>
+
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
+#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
+/* Unallocated EC: 0x0A - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+#define ESR_ELx_EC_BTI (0x0D)
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
+#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
+#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
+#define ESR_ELx_EC_SYS64 (0x18)
+#define ESR_ELx_EC_SVE (0x19)
+#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+/* Unallocated EC: 0x1D - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
+/* Unallocated EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
+
+#define ESR_ELx_EC_SHIFT (26)
+#define ESR_ELx_EC_MASK ((uint64_t)(0x3F) << ESR_ELx_EC_SHIFT)
+#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
+
+// Taken from include/uapi/linux/const.h
+#define UL(x) (x##UL)
+
+#define ESR_ELx_IL_SHIFT (25)
+#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
+#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+
+/* ISS field definitions shared by different classes */
+#define ESR_ELx_WNR_SHIFT (6)
+#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
+
+/* Asynchronous Error Type */
+#define ESR_ELx_IDS_SHIFT (24)
+#define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT)
+#define ESR_ELx_AET_SHIFT (10)
+#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
+
+#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
+
+/* Shared ISS field definitions for Data/Instruction aborts */
+#define ESR_ELx_SET_SHIFT (11)
+#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
+#define ESR_ELx_FnV_SHIFT (10)
+#define ESR_ELx_FnV (UL(1) << ESR_ELx_FnV_SHIFT)
+#define ESR_ELx_EA_SHIFT (9)
+#define ESR_ELx_EA (UL(1) << ESR_ELx_EA_SHIFT)
+#define ESR_ELx_S1PTW_SHIFT (7)
+#define ESR_ELx_S1PTW (UL(1) << ESR_ELx_S1PTW_SHIFT)
+
+/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
+#define ESR_ELx_FSC (0x3F)
+#define ESR_ELx_FSC_TYPE (0x3C)
+#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_SERROR (0x11)
+#define ESR_ELx_FSC_ACCESS (0x08)
+#define ESR_ELx_FSC_FAULT (0x04)
+#define ESR_ELx_FSC_PERM (0x0C)
+
+/* ISS field definitions for Data Aborts */
+#define ESR_ELx_ISV_SHIFT (24)
+#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
+#define ESR_ELx_SAS_SHIFT (22)
+#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
+#define ESR_ELx_SSE_SHIFT (21)
+#define ESR_ELx_SSE (UL(1) << ESR_ELx_SSE_SHIFT)
+#define ESR_ELx_SRT_SHIFT (16)
+#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
+#define ESR_ELx_SF_SHIFT (15)
+#define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT)
+#define ESR_ELx_AR_SHIFT (14)
+#define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT)
+#define ESR_ELx_CM_SHIFT (8)
+#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
+
+/* ISS field definitions for exceptions taken in to Hyp */
+#define ESR_ELx_CV (UL(1) << 24)
+#define ESR_ELx_COND_SHIFT (20)
+#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
+#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
+#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
+#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+
+#define DISR_EL1_IDS (UL(1) << 24)
+/*
+ * DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
+ * different things in the future...
+ */
+#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
+
+/* ESR value templates for specific events */
+#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | ESR_ELx_WFx_ISS_TI)
+#define ESR_ELx_WFx_WFI_VAL ((ESR_ELx_EC_WFx << ESR_ELx_EC_SHIFT) | \
+ ESR_ELx_WFx_ISS_WFI)
+
+/* BRK instruction trap from AArch64 state */
+#define ESR_ELx_BRK64_ISS_COMMENT_MASK 0xffff
+
+/* ISS field definitions for System instruction traps */
+#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
+#define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT)
+#define ESR_ELx_SYS64_ISS_DIR_MASK 0x1
+#define ESR_ELx_SYS64_ISS_DIR_READ 0x1
+#define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_SYS64_ISS_RT_SHIFT 5
+#define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT)
+#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1
+#define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT)
+#define ESR_ELx_SYS64_ISS_CRN_SHIFT 10
+#define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP1_SHIFT 14
+#define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP2_SHIFT 17
+#define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT)
+#define ESR_ELx_SYS64_ISS_OP0_SHIFT 20
+#define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT)
+#define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
+ ESR_ELx_SYS64_ISS_OP1_MASK | \
+ ESR_ELx_SYS64_ISS_OP2_MASK | \
+ ESR_ELx_SYS64_ISS_CRN_MASK | \
+ ESR_ELx_SYS64_ISS_CRM_MASK)
+#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \
+ (((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \
+ ((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \
+ ((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \
+ ((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \
+ ((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT))
+
+#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \
+ ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_RT(esr) \
+ (((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT)
+/*
+ * User space cache operations have the following sysreg encoding
+ * in System instructions.
+ * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 13, 14 }, WRITE (L=0)
+ */
+#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVADP 13
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
+#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5
+
+#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
+ ESR_ELx_SYS64_ISS_OP1_MASK | \
+ ESR_ELx_SYS64_ISS_OP2_MASK | \
+ ESR_ELx_SYS64_ISS_CRN_MASK | \
+ ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
+ (ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_WRITE)
+/*
+ * User space MRS operations which are supported for emulation
+ * have the following sysreg encoding in System instructions.
+ * op0 = 3, op1= 0, crn = 0, {crm = 0, 4-7}, READ (L = 1)
+ */
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
+ ESR_ELx_SYS64_ISS_OP1_MASK | \
+ ESR_ELx_SYS64_ISS_CRN_MASK | \
+ ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL \
+ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 0, 0, 0, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0)
+#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define esr_sys64_to_sysreg(e) \
+ sys_reg((((e) & ESR_ELx_SYS64_ISS_OP0_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP0_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP1_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRN_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRM_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP2_SHIFT))
+
+#define esr_cp15_to_sysreg(e) \
+ sys_reg(3, \
+ (((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP1_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRN_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRM_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP2_SHIFT))
+
+/*
+ * ISS field definitions for floating-point exception traps
+ * (FP_EXC_32/FP_EXC_64).
+ *
+ * (The FPEXC_* constants are used instead for common bits.)
+ */
+
+#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
+
+/*
+ * ISS field definitions for CP15 accesses
+ */
+#define ESR_ELx_CP15_32_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_32_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_32_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_32_ISS_RT_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_32_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRM_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRN_SHIFT 10
+#define ESR_ELx_CP15_32_ISS_CRN_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRN_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP1_SHIFT 14
+#define ESR_ELx_CP15_32_ISS_OP1_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP2_SHIFT 17
+#define ESR_ELx_CP15_32_ISS_OP2_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP2_SHIFT)
+
+#define ESR_ELx_CP15_32_ISS_SYS_MASK (ESR_ELx_CP15_32_ISS_OP1_MASK | \
+ ESR_ELx_CP15_32_ISS_OP2_MASK | \
+ ESR_ELx_CP15_32_ISS_CRN_MASK | \
+ ESR_ELx_CP15_32_ISS_CRM_MASK | \
+ ESR_ELx_CP15_32_ISS_DIR_MASK)
+#define ESR_ELx_CP15_32_ISS_SYS_VAL(op1, op2, crn, crm) \
+ (((op1) << ESR_ELx_CP15_32_ISS_OP1_SHIFT) | \
+ ((op2) << ESR_ELx_CP15_32_ISS_OP2_SHIFT) | \
+ ((crn) << ESR_ELx_CP15_32_ISS_CRN_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_32_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
+#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_OP1_SHIFT 16
+#define ESR_ELx_CP15_64_ISS_OP1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_SYS_VAL(op1, crm) \
+ (((op1) << ESR_ELx_CP15_64_ISS_OP1_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_SYS_MASK (ESR_ELx_CP15_64_ISS_OP1_MASK | \
+ ESR_ELx_CP15_64_ISS_CRM_MASK | \
+ ESR_ELx_CP15_64_ISS_DIR_MASK)
+
+#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
+ ESR_ELx_CP15_64_ISS_DIR_READ)
+
+#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
+ ESR_ELx_CP15_32_ISS_DIR_READ)
+
+#endif /* __ASM_ESR_H */
diff --git a/target/arm/hvf.c b/target/arm/hvf.c
index 77af248..fa12a27 100644
--- a/target/arm/hvf.c
+++ b/target/arm/hvf.c
@@ -15,8 +15,9 @@
#include <Hypervisor/Hypervisor.h>
-#include "hvf-arm64.h"
+#include "esr.h"
+#include "hvf-arm64.h"
#include "internals.h"
#include "exec/address-spaces.h"
@@ -662,7 +663,7 @@
// VCPU run/////////////////////////////////////////////////////////////////////
int hvf_vcpu_emulation_mode(CPUState* cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
// TODO-convert-to-arm64
// return !(env->cr[0] & CR0_PG_MASK);
@@ -670,23 +671,23 @@
}
int hvf_vcpu_destroy(CPUState* cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
return 0;
}
void hvf_raise_event(CPUState* cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
// TODO
}
// TODO-convert-to-arm64
void hvf_inject_interrupts(CPUState *cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
}
// TODO-convert-to-arm64
int hvf_process_events(CPUState *cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
return 0;
}
static hv_reg_t regno_to_hv_xreg(int i) {
@@ -801,7 +802,7 @@
int ret;
unsigned int el;
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
ARMCPU *armcpu = ARM_CPU(cpu);
CPUARMState *env = &armcpu->env;
@@ -809,7 +810,7 @@
* AArch64 registers before pushing them out to 64-bit HVF.
*/
if (!is_a64(env)) {
- fprintf(stderr, "%s: syncing 32 to 64!\n", __func__);
+ DPRINTF("%s: syncing 32 to 64!\n", __func__);
aarch64_sync_32_to_64(env);
}
@@ -1015,7 +1016,7 @@
}
int hvf_get_registers(CPUState *cpu) {
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
uint64_t val;
unsigned int el;
int i;
@@ -1242,7 +1243,7 @@
// TODO: synchronize vcpu state
void __hvf_cpu_synchronize_state(CPUState* cpu_state, run_on_cpu_data data)
{
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
(void)data;
if (cpu_state->hvf_vcpu_dirty == 0)
hvf_get_registers(cpu_state);
@@ -1258,7 +1259,7 @@
void __hvf_cpu_synchronize_post_reset(CPUState* cpu_state, run_on_cpu_data data)
{
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
(void)data;
hvf_put_registers(cpu_state);
@@ -1275,7 +1276,7 @@
void _hvf_cpu_synchronize_post_init(CPUState* cpu_state, run_on_cpu_data data)
{
- fprintf(stderr, "%s: call\n", __func__);
+ DPRINTF("%s: call\n", __func__);
(void)data;
hvf_put_registers(cpu_state);
cpu_state->hvf_vcpu_dirty = false;
@@ -1507,14 +1508,251 @@
}
}
-#define ESR_ELx_EC_SHIFT (26ULL)
-#define ESR_ELx_EC_MASK ((0x3FULL) << ESR_ELx_EC_SHIFT)
-#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
-
static void hvf_read_mem(struct CPUState* cpu, void *data, uint64_t gpa, int bytes) {
address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, bytes, 0);
}
+static void hvf_handle_wfx(CPUState* cpu) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
+static void hvf_handle_cp(CPUState* cpu, uint32_t ec) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
+static void hvf_handle_hvc(CPUState* cpu, uint32_t ec) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
+static void hvf_handle_smc(CPUState* cpu, uint32_t ec) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
+static void hvf_handle_sys_reg(CPUState* cpu) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
+static inline uint32_t hvf_vcpu_get_hsr(CPUState* cpu) {
+ return cpu->hvf_vcpu_exit_info->exception.syndrome;
+}
+
+static inline int hvf_vcpu_dabt_get_as(CPUState* cpu) {
+ return 1 << ((hvf_vcpu_get_hsr(cpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+}
+
+static inline int hvf_vcpu_dabt_get_rd(CPUState* cpu) {
+ return 1 << ((hvf_vcpu_get_hsr(cpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
+}
+
+static inline bool hvf_vcpu_trap_il_is32bit(CPUState* cpu) {
+ return !!(hvf_vcpu_get_hsr(cpu) & ESR_ELx_IL);
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu: The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void hvf_adjust_itstate(CPUState* cpu)
+{
+ ARMCPU *armcpu = ARM_CPU(cpu);
+ CPUARMState *env = &armcpu->env;
+ static const uint32_t k_compat_psr_t_bit = 0x00000020;
+ static const uint32_t k_compat_psr_it_mask = 0x0600fc00;
+
+ unsigned long itbits, cond;
+ uint32_t cpsr = cpsr_read(env);
+ bool is_arm = !(cpsr & k_compat_psr_t_bit);
+
+ // BUG_ON(is_arm && (cpsr & k_compat_psr_it_mask));
+
+ if (!(cpsr & k_compat_psr_it_mask))
+ return;
+
+ cond = (cpsr & 0xe000) >> 13;
+ itbits = (cpsr & 0x1c00) >> (10 - 2);
+ itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+ /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
+ if ((itbits & 0x7) == 0)
+ itbits = cond = 0;
+ else
+ itbits = (itbits << 1) & 0x1f;
+
+ cpsr &= ~k_compat_psr_it_mask;
+ cpsr |= cond << 13;
+ cpsr |= (itbits & 0x1c) << (10 - 2);
+ cpsr |= (itbits & 0x3) << 25;
+ cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
+}
+
+static inline void hvf_skip_instr32(CPUState* cpu, bool is_wide_instr) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+ CPUARMState *env = &armcpu->env;
+ bool is_thumb;
+ static const uint32_t k_compat_psr_t_bit = 0x00000020;
+
+ is_thumb = !!(cpsr_read(env) & k_compat_psr_t_bit);
+ if (is_thumb && !is_wide_instr) {
+ env->pc += 2;
+ } else {
+ env->pc += 4;
+ }
+ hvf_adjust_itstate(cpu);
+}
+
+static inline void hvf_skip_instr(CPUState* cpu, bool is_wide_instr) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+ CPUARMState *env = &armcpu->env;
+
+ if (is_a64(env)) {
+ env->pc += 4;
+ } else {
+ hvf_skip_instr32(cpu, is_wide_instr);
+ }
+}
+
+static void hvf_decode_hsr(CPUState* cpu, bool* is_write, int* len) {
+ uint32_t esr = hvf_vcpu_get_hsr(cpu);
+ unsigned long rt;
+ int access_size;
+ bool sign_extend;
+ bool is_extabt = ESR_ELx_EA & esr;
+ bool is_ss1tw = ESR_ELx_S1PTW & esr;
+
+ if (is_extabt) {
+ DPRINTF("%s: cache operation on I/O addr. not implemented\n", __func__);
+ abort();
+ }
+
+ if (is_ss1tw) {
+ DPRINTF("%s: page table access to I/O mem. tell guest to fix its TTBR\n");
+ abort();
+ }
+
+ access_size = hvf_vcpu_dabt_get_as(cpu);
+
+ DPRINTF("%s: access size: %d\n", __func__, access_size);
+
+ if (access_size < 0) {
+ abort();
+ }
+
+ *is_write = esr & ESR_ELx_WNR;
+ sign_extend = esr & ESR_ELx_SSE;
+ rt = hvf_vcpu_dabt_get_rd(cpu);
+
+ *len = access_size;
+
+ // MMIO is emulated and shuld not be re-executed.
+ hvf_skip_instr(cpu, hvf_vcpu_trap_il_is32bit(cpu));
+
+ abort();
+}
+
+static void hvf_handle_mmio(CPUState* cpu) {
+ uint64_t gpa = cpu->hvf_vcpu_exit_info->exception.physical_address;
+ uint32_t esr = cpu->hvf_vcpu_exit_info->exception.syndrome;
+ unsigned long data;
+ unsigned long rt;
+ int ret;
+ bool is_write;
+ int len;
+ uint8_t data_buf[8];
+
+ bool dabt_valid = esr & ESR_ELx_ISV;
+
+ DPRINTF("%s: dabt valid? %d\n", __func__, dabt_valid);
+
+ hvf_decode_hsr(cpu, &is_write, &len);
+
+ abort();
+}
+
+static void hvf_handle_guest_abort(CPUState* cpu, uint32_t ec) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ // TODO: 4K page guest on a 16K page host
+ static const uint32_t k_page_shift = 12;
+
+ uint64_t gpa = cpu->hvf_vcpu_exit_info->exception.physical_address;
+ hvf_slot* slot = hvf_find_overlap_slot(gpa, gpa + 1);
+ uint32_t esr = cpu->hvf_vcpu_exit_info->exception.syndrome;
+ uint32_t fault_status = esr & ESR_ELx_FSC_TYPE;
+ bool is_iabt = ESR_ELx_EC_IABT_LOW == ec;
+ bool is_write = (!is_iabt) && (esr & ESR_ELx_WNR);
+ bool is_cm = esr & ESR_ELx_CM;
+
+ DPRINTF("Fault gpa: 0x%llx\n", (unsigned long long)gpa);
+
+ switch (fault_status) {
+ case ESR_ELx_FSC_FAULT:
+ DPRINTF("%s: is ESR_ELx_FSC_FAULT\n", __func__);
+ break;
+ case ESR_ELx_FSC_ACCESS:
+ DPRINTF("%s: is ESR_ELx_FSC_ACCESS\n", __func__);
+ break;
+ case ESR_ELx_FSC_PERM:
+ DPRINTF("%s: is ESR_ELx_FSC_PERM\n", __func__);
+ break;
+ default:
+ DPRINTF("%s: Unknown fault status: 0x%x\n", __func__, fault_status);
+ break;
+ }
+
+ DPRINTF("%s: is write? %d\n", __func__, is_write);
+
+ if (ESR_ELx_FSC_ACCESS == fault_status) {
+ DPRINTF("%s: is access fault (not implemented)\n", __func__);
+ abort();
+ }
+
+ if (slot) {
+ DPRINTF("Overlap slot found for this fault\n");
+ }
+
+ if (!slot) {
+ DPRINTF("No overlap slot found for this fault, is MMIO\n");
+ if (is_iabt) {
+ DPRINTF("Prefetch abort on i/o address (not implemented)\n");
+ abort();
+ }
+
+
+ // Check for cache maint operation
+ if (is_cm) {
+ DPRINTF("Cache maintenance operation (not implemented)\n");
+ abort();
+ }
+
+ DPRINTF("Actual MMIO operation\n");
+ hvf_handle_mmio(cpu);
+ return;
+ }
+
+ if (ESR_ELx_FSC_ACCESS == fault_status) {
+ DPRINTF("Handle FSC_ACCESS fault (not implemented)\n");
+ abort();
+ }
+
+ DPRINTF("user_mem_abort\n");
+ abort();
+}
+
+static void hvf_handle_guest_debug(CPUState* cpu, uint32_t ec) {
+ DPRINTF("%s: call (not implemented)\n", __func__);
+ abort();
+}
+
static void hvf_handle_exception(CPUState* cpu) {
// We have an exception in EL2.
uint32_t syndrome = cpu->hvf_vcpu_exit_info->exception.syndrome;
@@ -1532,13 +1770,44 @@
uint8_t scratch[1024];
switch (ec) {
- case 0x20:
- hvf_read_mem(cpu, scratch, pa, 4);
- DPRINTF("Guest abort, aborting. pa %#llx mem: 0x%x\n", (unsigned long long)pa, *(uint32_t*)scratch);
- abort();
+ case ESR_ELx_EC_WFx:
+ hvf_handle_wfx(cpu);
+ break;
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_CP14_64:
+ hvf_handle_cp(cpu, ec);
+ break;
+ case ESR_ELx_EC_HVC32:
+ case ESR_ELx_EC_HVC64:
+ hvf_handle_hvc(cpu, ec);
+ break;
+ case ESR_ELx_EC_SMC32:
+ case ESR_ELx_EC_SMC64:
+ hvf_handle_smc(cpu, ec);
+ break;
+ case ESR_ELx_EC_SYS64:
+ hvf_handle_sys_reg(cpu);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ case ESR_ELx_EC_DABT_LOW:
+ DPRINTF("%s: guest abort!\n", __func__);
+ hvf_handle_guest_abort(cpu, ec);
+ break;
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_WATCHPT_LOW:
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ case ESR_ELx_EC_BRK64:
+ hvf_handle_guest_debug(cpu, ec);
break;
default:
- DPRINTF("Some other exception class: 0x%x\n", __func__, ec);
+ DPRINTF("%s: Some other exception class: 0x%x\n", __func__, ec);
+ hvf_get_registers(cpu);
+ hvf_put_registers(cpu);
+ abort();
};
}
@@ -1546,26 +1815,28 @@
ARMCPU* armcpu = ARM_CPU(cpu);
CPUARMState* env = &armcpu->env;
int ret = 0;
+ uint64_t pc;
uint64_t val;
+ int i;
// TODO-convert-to-arm64
// uint64_t rip = 0;
// armcpu->halted = 0;
- // if (hvf_process_events(armcpu)) {
- // qemu_mutex_unlock_iothread();
- // pthread_yield_np();
- // qemu_mutex_lock_iothread();
- // return EXCP_HLT;
- // }
+ if (hvf_process_events(armcpu)) {
+ qemu_mutex_unlock_iothread();
+ pthread_yield_np();
+ qemu_mutex_lock_iothread();
+ return EXCP_HLT;
+ }
again:
do {
if (cpu->hvf_vcpu_dirty) {
- fprintf(stderr, "%s: should put registers\n", __func__);
+ DPRINTF("%s: should put registers\n", __func__);
hvf_put_registers(cpu);
cpu->hvf_vcpu_dirty = false;
}
@@ -1588,8 +1859,10 @@
// }
- HVF_CHECKED_CALL(hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_PC, &val));
- DPRINTF("%s: run vcpu. pc: 0x%llx\n", __func__, (unsigned long long)val);
+ HVF_CHECKED_CALL(hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_PC, &pc));
+ hvf_read_mem(cpu, &val, pc, 8);
+ DPRINTF("%s: run vcpu. pc: 0x%llx 8 bytes at pc: 0x%llx\n", __func__, (unsigned long long)pc, (unsigned long long)val);
+
int r = hv_vcpu_run(cpu->hvf_fd);
if (r) {