Move more stuff from cpu-defs.h to qom/cpu.h
This time, move jmp_env, opaque, mem_io_pc, mem_io_vaddr,
exception_index, tcg_exit_req. Note that the last one is not
used yet.
Change-Id: I3b14fd278feae0855bdd4a38eefd8170f404b82a
diff --git a/cpu-exec.c b/cpu-exec.c
index 7984748..9cc8741 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -52,7 +52,7 @@
{
CPUState *cpu = ENV_GET_CPU(env);
cpu->current_tb = NULL;
- longjmp(env->jmp_env, 1);
+ siglongjmp(cpu->jmp_env, 1);
}
/* exit the current TB from a signal handler. The host registers are
@@ -61,8 +61,11 @@
#if defined(CONFIG_SOFTMMU)
void cpu_resume_from_signal(CPUArchState *env, void *puc)
{
- env->exception_index = -1;
- longjmp(env->jmp_env, 1);
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ /* XXX: restore cpu registers saved in host registers */
+ cpu->exception_index = -1;
+ siglongjmp(cpu->jmp_env, 1);
}
#endif
@@ -267,16 +270,16 @@
#else
#error unsupported target CPU
#endif
- env->exception_index = -1;
+ cpu->exception_index = -1;
/* prepare setjmp context for exception handling */
for(;;) {
- if (setjmp(env->jmp_env) == 0) {
+ if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
- if (env->exception_index >= 0) {
- if (env->exception_index >= EXCP_INTERRUPT) {
+ if (cpu->exception_index >= 0) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
- ret = env->exception_index;
+ ret = cpu->exception_index;
if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
}
@@ -289,23 +292,23 @@
#if defined(TARGET_I386)
do_interrupt(env);
#endif
- ret = env->exception_index;
+ ret = cpu->exception_index;
break;
#else
do_interrupt(env);
- env->exception_index = -1;
+ cpu->exception_index = -1;
#endif
}
}
#ifdef CONFIG_HAX
if (hax_enabled() && !hax_vcpu_exec(cpu))
- longjmp(env->jmp_env, 1);
+ longjmp(cpu->jmp_env, 1);
#endif
if (kvm_enabled()) {
kvm_cpu_exec(cpu);
- longjmp(env->jmp_env, 1);
+ longjmp(cpu->jmp_env, 1);
}
next_tb = 0; /* force lookup of first TB */
@@ -318,7 +321,7 @@
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
- env->exception_index = EXCP_DEBUG;
+ cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(env);
}
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
@@ -327,7 +330,7 @@
if (interrupt_request & CPU_INTERRUPT_HALT) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
- env->exception_index = EXCP_HLT;
+ cpu->exception_index = EXCP_HLT;
cpu_loop_exit(env);
}
#endif
@@ -335,7 +338,7 @@
if (interrupt_request & CPU_INTERRUPT_INIT) {
svm_check_intercept(env, SVM_EXIT_INIT);
do_cpu_init(env);
- env->exception_index = EXCP_HALTED;
+ cpu->exception_index = EXCP_HALTED;
cpu_loop_exit(env);
} else if (interrupt_request & CPU_INTERRUPT_SIPI) {
do_cpu_sipi(env);
@@ -364,7 +367,8 @@
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
svm_check_intercept(env, SVM_EXIT_INTR);
- cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
+ cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
@@ -417,7 +421,7 @@
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) {
/* Raise it */
- env->exception_index = EXCP_EXT_INTERRUPT;
+ cpu->exception_index = EXCP_EXT_INTERRUPT;
env->error_code = 0;
do_interrupt(env);
next_tb = 0;
@@ -458,7 +462,7 @@
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->uncached_cpsr & CPSR_F)) {
- env->exception_index = EXCP_FIQ;
+ cpu->exception_index = EXCP_FIQ;
do_interrupt(env);
next_tb = 0;
}
@@ -474,7 +478,7 @@
if (interrupt_request & CPU_INTERRUPT_HARD
&& ((IS_M(env) && env->regs[15] < 0xfffffff0)
|| !(env->uncached_cpsr & CPSR_I))) {
- env->exception_index = EXCP_IRQ;
+ cpu->exception_index = EXCP_IRQ;
do_interrupt(env);
next_tb = 0;
}
@@ -580,7 +584,7 @@
}
if (unlikely(cpu->exit_request)) {
cpu->exit_request = 0;
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(env);
}
#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
@@ -659,7 +663,7 @@
/* Execute remaining instructions. */
cpu_exec_nocache(env, insns_left, tb);
}
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
next_tb = 0;
cpu_loop_exit(env);
}
diff --git a/exec.c b/exec.c
index 6bd2d3c..277f004 100644
--- a/exec.c
+++ b/exec.c
@@ -1470,7 +1470,7 @@
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, env->mem_io_vaddr);
+ tlb_set_dirty(env, current_cpu->mem_io_vaddr);
}
}
@@ -1487,7 +1487,7 @@
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, env->mem_io_vaddr);
+ tlb_set_dirty(env, current_cpu->mem_io_vaddr);
}
}
@@ -1504,7 +1504,7 @@
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, env->mem_io_vaddr);
+ tlb_set_dirty(env, current_cpu->mem_io_vaddr);
}
}
@@ -1538,7 +1538,7 @@
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
return;
}
- vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
+ vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
@@ -1547,7 +1547,7 @@
env->watchpoint_hit = wp;
tb_check_watchpoint(env);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
- env->exception_index = EXCP_DEBUG;
+ cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(env);
} else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
diff --git a/hw/android/goldfish/trace.c b/hw/android/goldfish/trace.c
index e4bd9fa..4140186 100644
--- a/hw/android/goldfish/trace.c
+++ b/hw/android/goldfish/trace.c
@@ -198,7 +198,7 @@
break;
case TRACE_DEV_REG_STOP_EMU: // stop the VM execution
- cpu_single_env->exception_index = EXCP_HLT;
+ current_cpu->exception_index = EXCP_HLT;
current_cpu->halted = 1;
qemu_system_shutdown_request();
cpu_loop_exit(cpu_single_env);
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 63492fa..473b19a 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -444,7 +444,7 @@
/* Return the physical page corresponding to a virtual one. Use it
only for debugging because no protection checks are done. Return -1
if no page found. */
-hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
+hwaddr cpu_get_phys_page_debug(CPUArchState *env, vaddr addr);
/* memory API */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 3273695..eb172b9 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -61,9 +61,7 @@
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
@@ -151,13 +149,6 @@
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
/* soft mmu support */ \
- /* in order to avoid passing too many arguments to the MMIO \
- helpers, we store some rarely used information in the CPU \
- context) */ \
- uintptr_t mem_io_pc; /* host pc at which the memory was \
- accessed */ \
- target_ulong mem_io_vaddr; /* target virtual addr at which the \
- memory was accessed */ \
CPU_COMMON_TLB \
\
int64_t icount_extra; /* Instructions until next timer event. */ \
@@ -176,12 +167,5 @@
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
- /* Core interrupt code */ \
- jmp_buf jmp_env; \
- int exception_index; \
- \
- /* user data */ \
- void *opaque; \
- \
#endif
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index fb0cb4d..4a9c31b 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -117,16 +117,16 @@
target_ulong addr,
uintptr_t retaddr)
{
+ CPUState *cpu = ENV_GET_CPU(env);
uint64_t val;
int index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- env->mem_io_pc = retaddr;
- if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
- && !cpu_can_do_io(ENV_GET_CPU(env))) {
+ cpu->mem_io_pc = retaddr;
+ if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) && !cpu_can_do_io(cpu)) {
cpu_io_recompile(env, retaddr);
}
- env->mem_io_vaddr = addr;
+ cpu->mem_io_vaddr = addr;
#if SHIFT <= 2
val = io_mem_read(index, physaddr, 1 << SHIFT);
#else
@@ -334,15 +334,16 @@
target_ulong addr,
uintptr_t retaddr)
{
+ CPUState *cpu = ENV_GET_CPU(env);
int index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
- && !cpu_can_do_io(ENV_GET_CPU(env))) {
+ && !cpu_can_do_io(cpu)) {
cpu_io_recompile(env, retaddr);
}
- env->mem_io_vaddr = addr;
- env->mem_io_pc = retaddr;
+ cpu->mem_io_vaddr = addr;
+ cpu->mem_io_pc = retaddr;
#if SHIFT <= 2
io_mem_write(index, physaddr, val, 1 << SHIFT);
#else
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 67cf35b..3b4071a 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -61,6 +61,9 @@
struct KVMState;
struct kvm_run;
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
// TODO(digit): Make this a proper QOM object that inherits from
// DeviceState/DeviceClass.
struct CPUState {
@@ -82,15 +85,24 @@
volatile sig_atomic_t exit_request;
uint32_t interrupt_request;
int singlestep_enabled;
+ sigjmp_buf jmp_env;
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
- QTAILQ_ENTRY(CPUState) node; /* next CPU sharing TB cache */
+ QTAILQ_ENTRY(CPUState) node;
const char *cpu_model_str;
+ void *opaque;
+
+ /* In order to avoid passing too many arguments to the MMIO helpers,
+ * we store some rarely used information in the CPU context.
+ */
+ uintptr_t mem_io_pc;
+ vaddr mem_io_vaddr;
+
int kvm_fd;
bool kvm_vcpu_dirty;
struct KVMState *kvm_state;
@@ -102,6 +114,13 @@
int cpu_index; /* used by alpha TCG */
uint32_t halted; /* used by alpha, cris, ppc TCG */
uint32_t can_do_io;
+ int32_t exception_index; /* used by m68k TCG */
+
+ /* Note that this is accessed at the start of every TB via a negative
+ offset from AREG0. Leave this field at the end so as to make the
+ (absolute value) offset as small as possible. This reduces code
+ size, especially for hosts without large memory offsets. */
+ volatile sig_atomic_t tcg_exit_req;
};
QTAILQ_HEAD(CPUTailQ, CPUState);
diff --git a/kvm-all.c b/kvm-all.c
index cd8ddd3..39463c3 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -595,7 +595,6 @@
int kvm_cpu_exec(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
struct kvm_run *run = cpu->kvm_run;
int ret;
@@ -666,7 +665,7 @@
if (kvm_arch_debug(&run->debug.arch)) {
gdb_set_stop_cpu(cpu);
vm_stop(EXCP_DEBUG);\
- env->exception_index = EXCP_DEBUG;
+ cpu->exception_index = EXCP_DEBUG;
return 0;
}
/* re-enter, this exception was guest-internal */
@@ -682,7 +681,7 @@
if (cpu->exit_request) {
cpu->exit_request = 0;
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
}
return ret;
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 38f8673..2da60ff 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -315,7 +315,7 @@
is returned if the signal was handled by the virtual CPU. */
int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
+int cpu_arm_handle_mmu_fault (CPUARMState *env, vaddr address, int rw,
int mmu_idx);
#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 6cbea7c..d522318 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -820,7 +820,7 @@
handle it. */
/* TODO: Need to escalate if the current priority is higher than the
one we're raising. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
return;
@@ -851,7 +851,7 @@
do_v7m_exception_exit(env);
return;
default:
- cpu_abort(cs, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
@@ -893,7 +893,7 @@
return;
}
/* TODO: Vectored interrupt controller. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
@@ -965,7 +965,7 @@
offset = 4;
break;
default:
- cpu_abort(cs, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
@@ -1063,8 +1063,8 @@
}
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot,
- target_ulong *page_size)
+ int is_user, hwaddr *phys_ptr,
+ int *prot, target_ulong *page_size)
{
int code;
uint32_t table;
@@ -1072,7 +1072,7 @@
int type;
int ap;
int domain;
- uint32_t phys_addr;
+ hwaddr phys_addr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
@@ -1156,8 +1156,8 @@
}
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot,
- target_ulong *page_size)
+ int is_user, hwaddr *phys_ptr,
+ int *prot, target_ulong *page_size)
{
int code;
uint32_t table;
@@ -1166,7 +1166,7 @@
int type;
int ap;
int domain;
- uint32_t phys_addr;
+ hwaddr phys_addr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
@@ -1258,8 +1258,9 @@
return code | (domain << 4);
}
-static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
+ int access_type, int is_user,
+ hwaddr *phys_ptr, int *prot)
{
int n;
uint32_t mask;
@@ -1318,17 +1319,40 @@
return 0;
}
+/* get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns 0 if the translation was successful. Otherwise, phys_ptr,
+ * prot and page_size are not filled in, and the return value provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for MPU based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @is_user: 0 for privileged access, 1 for user
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ */
#ifdef CONFIG_GLES2
int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot,
+ hwaddr *phys_ptr, int *prot,
target_ulong *page_size);
#else
static
#endif
int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot,
+ hwaddr *phys_ptr, int *prot,
target_ulong *page_size)
{
/* Fast Context Switch Extension. */
@@ -1354,10 +1378,11 @@
}
}
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
+int cpu_arm_handle_mmu_fault (CPUARMState *env, vaddr address,
int access_type, int mmu_idx)
{
- uint32_t phys_addr;
+ CPUState *cs = ENV_GET_CPU(env);
+ hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret, is_user;
@@ -1367,7 +1392,7 @@
&page_size);
if (ret == 0) {
/* Map a single [sub]page. */
- phys_addr &= ~(uint32_t)0x3ff;
+ phys_addr &= ~(hwaddr)0x3ff;
address &= ~(uint32_t)0x3ff;
tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx,
page_size);
@@ -1377,28 +1402,29 @@
if (access_type == 2) {
env->cp15.c5_insn = ret;
env->cp15.c6_insn = address;
- env->exception_index = EXCP_PREFETCH_ABORT;
+ cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
env->cp15.c5_data = ret;
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
env->cp15.c5_data |= (1 << 11);
env->cp15.c6_data = address;
- env->exception_index = EXCP_DATA_ABORT;
+ cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUARMState *env, vaddr addr)
{
- uint32_t phys_addr;
+ hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret;
ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
- if (ret != 0)
+ if (ret != 0) {
return -1;
+ }
return phys_addr;
}
@@ -1619,7 +1645,7 @@
}
break;
case 8: {
- uint32_t phys_addr;
+ hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret, is_user = op2 & 2;
@@ -2360,10 +2386,11 @@
env->v7m.other_sp = val;
break;
case 16: /* PRIMASK */
- if (val & 1)
+ if (val & 1) {
env->uncached_cpsr |= CPSR_I;
- else
+ } else {
env->uncached_cpsr &= ~CPSR_I;
+ }
break;
case 17: /* BASEPRI */
env->v7m.basepri = val & 0xff;
@@ -2374,10 +2401,11 @@
env->v7m.basepri = val;
break;
case 19: /* FAULTMASK */
- if (val & 1)
+ if (val & 1) {
env->uncached_cpsr |= CPSR_F;
- else
+ } else {
env->uncached_cpsr &= ~CPSR_F;
+ }
break;
case 20: /* CONTROL */
env->v7m.control = val & 3;
@@ -2657,8 +2685,8 @@
return (val >> 32) | (val != 0);
}
-/* VFP support. We follow the convention used for VFP instrunctions:
- Single precition routines have a "s" suffix, double precision a
+/* VFP support. We follow the convention used for VFP instructions:
+ Single precision routines have a "s" suffix, double precision a
"d" suffix. */
/* Convert host exception flags to vfp form. */
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 5ca3805..91338a9 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -26,7 +26,10 @@
#if !defined(CONFIG_USER_ONLY)
static void raise_exception(CPUARMState *env, int tt)
{
- env->exception_index = tt;
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = tt;
cpu_loop_exit(env);
}
#endif
@@ -79,6 +82,7 @@
void tlb_fill(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
+ CPUState *cs = ENV_GET_CPU(env);
int ret;
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx);
@@ -87,7 +91,7 @@
/* now we have a real cpu fault */
cpu_restore_state(env, retaddr);
}
- raise_exception(env, env->exception_index);
+ raise_exception(env, cs->exception_index);
}
}
@@ -265,14 +269,18 @@
void HELPER(wfi)(CPUARMState *env)
{
- env->exception_index = EXCP_HLT;
- ENV_GET_CPU(env)->halted = 1;
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ cs->exception_index = EXCP_HLT;
+ cs->halted = 1;
cpu_loop_exit(env);
}
void HELPER(exception)(CPUARMState *env, uint32_t excp)
{
- env->exception_index = excp;
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ cs->exception_index = excp;
cpu_loop_exit(env);
}
diff --git a/target-i386/excp_helper.c b/target-i386/excp_helper.c
index 3cd96a3..0b2fbc9 100644
--- a/target-i386/excp_helper.c
+++ b/target-i386/excp_helper.c
@@ -89,10 +89,11 @@
* env->eip value AFTER the interrupt instruction. It is only relevant if
* is_int is TRUE.
*/
-void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno,
+static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
int is_int, int error_code,
int next_eip_addend)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
if (!is_int) {
helper_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
error_code);
@@ -101,7 +102,7 @@
helper_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
}
- env->exception_index = intno;
+ cs->exception_index = intno;
env->error_code = error_code;
env->exception_is_int = is_int;
env->exception_next_eip = env->eip + next_eip_addend;
@@ -110,13 +111,18 @@
/* shortcuts to generate exceptions */
+void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
+ int error_code, int next_eip_addend)
+{
+ raise_interrupt2(env, intno, is_int, error_code, next_eip_addend);
+}
void raise_exception_err(CPUX86State *env, int exception_index,
int error_code)
{
- raise_interrupt(env, exception_index, 0, error_code, 0);
+ raise_interrupt2(env, exception_index, 0, error_code, 0);
}
void raise_exception(CPUX86State *env, int exception_index)
{
- raise_interrupt(env, exception_index, 0, 0, 0);
+ raise_interrupt2(env, exception_index, 0, 0, 0);
}
diff --git a/target-i386/hax-all.c b/target-i386/hax-all.c
index 05f01ed..42076ae 100644
--- a/target-i386/hax-all.c
+++ b/target-i386/hax-all.c
@@ -599,7 +599,7 @@
/* hlt instruction with interrupt disabled is shutdown */
env->eflags |= IF_MASK;
cpu->halted = 1;
- env->exception_index = EXCP_HLT;
+ cpu->exception_index = EXCP_HLT;
ret = HAX_EMUL_HLT;
}
break;
@@ -619,7 +619,7 @@
if (cpu->exit_request) {
cpu->exit_request = 0;
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
}
return ret;
}
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 9ce3d80..f5f9e06 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -967,6 +967,7 @@
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx)
{
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
@@ -1002,7 +1003,7 @@
sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1) {
env->error_code = 0;
- env->exception_index = EXCP0D_GPF;
+ cs->exception_index = EXCP0D_GPF;
return 1;
}
@@ -1250,7 +1251,7 @@
env->cr[2] = addr;
}
env->error_code = error_code;
- env->exception_index = EXCP0E_PAGE;
+ cs->exception_index = EXCP0E_PAGE;
return 1;
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 38752a7..3efc138 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -785,7 +785,7 @@
(env->eflags & IF_MASK)) &&
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
cpu->halted = 1;
- env->exception_index = EXCP_HLT;
+ cpu->exception_index = EXCP_HLT;
return 0;
}
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
index b4d3f9a..e14c775 100644
--- a/target-i386/mem_helper.c
+++ b/target-i386/mem_helper.c
@@ -138,11 +138,12 @@
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (ret) {
+ CPUState *cs = ENV_GET_CPU(env);
if (retaddr) {
/* now we have a real cpu fault */
cpu_restore_state(env, retaddr);
}
- raise_exception_err(env, env->exception_index, env->error_code);
+ raise_exception_err(env, cs->exception_index, env->error_code);
}
}
#endif
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
index f4863dc..6b77034 100644
--- a/target-i386/misc_helper.c
+++ b/target-i386/misc_helper.c
@@ -544,20 +544,25 @@
}
#endif
-static void do_hlt(CPUX86State *env)
+static void do_hlt(X86CPU *cpu)
{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
- ENV_GET_CPU(env)->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
cpu_loop_exit(env);
}
void helper_hlt(CPUX86State *env, int next_eip_addend)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
helper_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
env->eip += next_eip_addend;
- do_hlt(env);
+ do_hlt(cpu);
}
void helper_monitor(CPUX86State *env, target_ulong ptr)
@@ -571,24 +576,30 @@
void helper_mwait(CPUX86State *env, int next_eip_addend)
{
+ CPUState *cs;
+ X86CPU *cpu;
+
if ((uint32_t)env->regs[R_ECX] != 0) {
raise_exception(env, EXCP0D_GPF);
}
helper_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
env->eip += next_eip_addend;
+ cpu = x86_env_get_cpu(env);
+ cs = CPU(cpu);
/* XXX: not complete but not completely erroneous */
- CPUState *cpu = ENV_GET_CPU(env);
- if (cpu->cpu_index != 0 || QTAILQ_NEXT(cpu, node) != NULL) {
+ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
/* more than one CPU: do not sleep because another CPU may
wake this one */
} else {
- do_hlt(env);
+ do_hlt(cpu);
}
}
void helper_debug(CPUX86State *env)
{
- env->exception_index = EXCP_DEBUG;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
cpu_loop_exit(env);
}
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index e8a4345..e7f314f 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1220,12 +1220,13 @@
void do_interrupt(CPUX86State *env)
{
+ CPUState *cs = ENV_GET_CPU(env);
+
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
- do_interrupt_user(env,
- env->exception_index,
+ do_interrupt_user(env, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
@@ -1235,8 +1236,7 @@
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
- do_interrupt_all(env,
- env->exception_index,
+ do_interrupt_all(env, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 5b2a83b..61981a9 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -107,6 +107,7 @@
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr;
uint32_t event_inj;
uint32_t int_ctl;
@@ -229,7 +230,9 @@
env->hflags2 |= HF2_GIF_MASK;
if (int_ctl & V_IRQ_MASK) {
- ENV_GET_CPU(env)->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
}
/* maybe we need to inject an event */
@@ -243,7 +246,7 @@
/* FIXME: need to implement valid_err */
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
case SVM_EVTINJ_TYPE_INTR:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
@@ -252,7 +255,7 @@
do_interrupt_x86_hardirq(env, vector, 1);
break;
case SVM_EVTINJ_TYPE_NMI:
- env->exception_index = EXCP02_NMI;
+ cs->exception_index = EXCP02_NMI;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = env->eip;
@@ -260,7 +263,7 @@
cpu_loop_exit(env);
break;
case SVM_EVTINJ_TYPE_EXEPT:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
@@ -268,7 +271,7 @@
cpu_loop_exit(env);
break;
case SVM_EVTINJ_TYPE_SOFT:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 1;
env->exception_next_eip = env->eip;
@@ -276,7 +279,8 @@
cpu_loop_exit(env);
break;
}
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
+ env->error_code);
}
}
@@ -487,9 +491,9 @@
}
/* Note: currently only 32 bits of exit_code are used */
-void helper_vmexit(CPUX86State *env,
- uint32_t exit_code, uint64_t exit_info_1)
+void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
uint32_t int_ctl;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
@@ -618,7 +622,7 @@
#GP fault is delivered inside the host.) */
/* remove any pending exception */
- env->exception_index = -1;
+ cs->exception_index = -1;
env->error_code = 0;
env->old_exception = -1;
diff --git a/target-mips/helper.c b/target-mips/helper.c
index dc353b2..2d70609 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -209,6 +209,7 @@
static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
int rw, int tlb_error)
{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
int exception = 0, error_code = 0;
switch (tlb_error) {
@@ -254,7 +255,7 @@
((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) |
((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9);
#endif
- env->exception_index = exception;
+ cs->exception_index = exception;
env->error_code = error_code;
}
@@ -512,7 +513,7 @@
}
#endif
-hwaddr cpu_get_phys_page_debug(CPUMIPSState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUMIPSState *env, vaddr addr)
{
#if defined(CONFIG_USER_ONLY)
return addr;
@@ -575,25 +576,28 @@
void do_interrupt (CPUMIPSState *env)
{
+ CPUState *cs = ENV_GET_CPU(env);
#if !defined(CONFIG_USER_ONLY)
target_ulong offset;
int cause = -1;
const char *name;
- if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) {
- if (env->exception_index < 0 || env->exception_index > EXCP_LAST)
+ if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
name = "unknown";
- else
- name = excp_names[env->exception_index];
+ } else {
+ name = excp_names[cs->exception_index];
+ }
qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n",
__func__, env->active_tc.PC, env->CP0_EPC, name);
}
- if (env->exception_index == EXCP_EXT_INTERRUPT &&
- (env->hflags & MIPS_HFLAG_DM))
- env->exception_index = EXCP_DINT;
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
offset = 0x180;
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_DSS:
env->CP0_Debug |= 1 << CP0DB_DSS;
/* Debug single step cannot be raised inside a delay slot and
@@ -781,11 +785,11 @@
env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
break;
default:
- qemu_log("Invalid MIPS exception %d. Exiting\n", env->exception_index);
- printf("Invalid MIPS exception %d. Exiting\n", env->exception_index);
+ qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
+ printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
exit(1);
}
- if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) {
+ if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
" S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
__func__, env->active_tc.PC, env->CP0_EPC, cause,
@@ -793,7 +797,7 @@
env->CP0_DEPC);
}
#endif
- env->exception_index = EXCP_NONE;
+ cs->exception_index = EXCP_NONE;
}
void r4k_invalidate_tlb (CPUMIPSState *env, int idx)
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 8a252f8..7541de1 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -85,11 +85,12 @@
void helper_raise_exception_err (CPUMIPSState *env,
uint32_t exception, int error_code)
{
-#if 1
- if (exception < 0x100)
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+
+ if (exception < 0x100) {
qemu_log("%s: %d %d\n", __func__, exception, error_code);
-#endif
- env->exception_index = exception;
+ }
+ cs->exception_index = exception;
env->error_code = error_code;
cpu_loop_exit(env);
}
@@ -1958,6 +1959,7 @@
void tlb_fill (CPUMIPSState* env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
+ CPUState *cs = ENV_GET_CPU(env);
TranslationBlock *tb;
int ret;
@@ -1972,7 +1974,7 @@
cpu_restore_state(env, retaddr);
}
}
- helper_raise_exception_err(env, env->exception_index, env->error_code);
+ helper_raise_exception_err(env, cs->exception_index, env->error_code);
}
}
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 798517b..1b6cef1 100755
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -8766,7 +8766,7 @@
env->hflags |= MIPS_HFLAG_64;
}
#endif
- env->exception_index = EXCP_NONE;
+ cpu->exception_index = EXCP_NONE;
}
void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos)
diff --git a/translate-all.c b/translate-all.c
index 132479e..b552105 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1092,9 +1092,9 @@
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
- if (env->mem_io_pc) {
+ if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
- current_tb = tb_find_pc(env->mem_io_pc);
+ current_tb = tb_find_pc(cpu->mem_io_pc);
}
}
if (current_tb == tb &&
@@ -1106,7 +1106,7 @@
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
+ cpu_restore_state_from_tb(current_tb, env, cpu->mem_io_pc);
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
¤t_flags);
}
@@ -1133,7 +1133,7 @@
if (!p->first_tb) {
invalidate_page_bitmap(p);
if (is_cpu_write_access) {
- tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
+ tlb_unprotect_code_phys(env, start, cpu->mem_io_vaddr);
}
}
#endif
@@ -1414,12 +1414,12 @@
CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb;
- tb = tb_find_pc(env->mem_io_pc);
+ tb = tb_find_pc(cpu->mem_io_pc);
if (!tb) {
cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
- (void *)env->mem_io_pc);
+ (void *)cpu->mem_io_pc);
}
- cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
+ cpu_restore_state_from_tb(tb, env, cpu->mem_io_pc);
tb_phys_invalidate(tb, -1);
}