cpu: Move tb_jmp_cache field from CPU_COMMON to CPUState
Clear it on reset.
Signed-off-by: Andreas Färber <afaerber@suse.de>
diff --git a/cpu-exec.c b/cpu-exec.c
index 9d98f21..dd8da53 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -118,6 +118,7 @@
target_ulong cs_base,
uint64_t flags)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
@@ -165,12 +166,13 @@
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
}
/* we add the TB in the virtual pc hash table */
- env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
+ cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
int flags;
@@ -179,7 +181,7 @@
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(env, pc, cs_base, flags);
diff --git a/cputlb.c b/cputlb.c
index 0fbaa39..0eb1801 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -58,7 +58,7 @@
cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table));
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index d036e8e..4272094 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -61,9 +61,6 @@
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
@@ -135,7 +132,6 @@
#define CPU_COMMON \
/* soft mmu support */ \
CPU_COMMON_TLB \
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 3156b16..ada8a5a 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -153,6 +153,9 @@
struct KVMState;
struct kvm_run;
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
/**
* CPUState:
* @cpu_index: CPU index (informative).
@@ -219,6 +222,7 @@
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
diff --git a/qom/cpu.c b/qom/cpu.c
index a4c5073..fada2d4 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -244,6 +244,7 @@
cpu->icount_extra = 0;
cpu->icount_decr.u32 = 0;
cpu->can_do_io = 0;
+ memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
}
static bool cpu_common_has_work(CPUState *cs)
diff --git a/translate-all.c b/translate-all.c
index 6bb3933..c067011 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -704,9 +704,7 @@
tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
@@ -857,10 +855,8 @@
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- if (env->tb_jmp_cache[h] == tb) {
- env->tb_jmp_cache[h] = NULL;
+ if (cpu->tb_jmp_cache[h] == tb) {
+ cpu->tb_jmp_cache[h] = NULL;
}
}
@@ -1484,16 +1480,17 @@
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
{
+ CPUState *cpu = ENV_GET_CPU(env);
unsigned int i;
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}