mmu-hash*: Remove eaddr field from mmu_ctx_hash{32, 64}
The eaddr field of mmu_ctx_hash{32,64} is effectively just used to pass the
effective address from get_segment{32,64}() to find_pte{32,64}(). Just
pass it as a normal parameter instead.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexander Graf <agraf@suse.de>
diff --git a/target-ppc/mmu-hash32.c b/target-ppc/mmu-hash32.c
index ad5ef44..8001563 100644
--- a/target-ppc/mmu-hash32.c
+++ b/target-ppc/mmu-hash32.c
@@ -43,7 +43,6 @@
struct mmu_ctx_hash32 {
hwaddr raddr; /* Real address */
- hwaddr eaddr; /* Effective address */
int prot; /* Protection bits */
hwaddr hash[2]; /* Pagetable hash values */
target_ulong ptem; /* Virtual segment ID | API */
@@ -314,8 +313,8 @@
}
/* PTE table lookup */
-static int find_pte32(CPUPPCState *env, struct mmu_ctx_hash32 *ctx, int h,
- int rwx, int target_page_bits)
+static int find_pte32(CPUPPCState *env, struct mmu_ctx_hash32 *ctx,
+ target_ulong eaddr, int h, int rwx, int target_page_bits)
{
hwaddr pteg_off;
target_ulong pte0, pte1;
@@ -371,7 +370,7 @@
/* We have a TLB that saves 4K pages, so let's
* split a huge page to 4k chunks */
if (target_page_bits != TARGET_PAGE_BITS) {
- ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
+ ctx->raddr |= (eaddr & ((1 << target_page_bits) - 1))
& TARGET_PAGE_MASK;
}
return ret;
@@ -387,7 +386,6 @@
target_ulong sr, pgidx;
pr = msr_pr;
- ctx->eaddr = eaddr;
sr = env->sr[eaddr >> 28];
ctx->key = (((sr & SR32_KP) && (pr != 0)) ||
@@ -426,14 +424,14 @@
env->htab_base, env->htab_mask, vsid, ctx->ptem,
ctx->hash[0]);
/* Primary table lookup */
- ret = find_pte32(env, ctx, 0, rwx, target_page_bits);
+ ret = find_pte32(env, ctx, eaddr, 0, rwx, target_page_bits);
if (ret < 0) {
/* Secondary table lookup */
LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
- ret2 = find_pte32(env, ctx, 1, rwx, target_page_bits);
+ ret2 = find_pte32(env, ctx, eaddr, 1, rwx, target_page_bits);
if (ret2 != -1) {
ret = ret2;
}
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index f89d005..407c6e6 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -42,7 +42,6 @@
struct mmu_ctx_hash64 {
hwaddr raddr; /* Real address */
- hwaddr eaddr; /* Effective address */
int prot; /* Protection bits */
hwaddr hash[2]; /* Pagetable hash values */
target_ulong ptem; /* Virtual segment ID | API */
@@ -372,8 +371,8 @@
}
/* PTE table lookup */
-static int find_pte64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx, int h,
- int rwx, int target_page_bits)
+static int find_pte64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
+ target_ulong eaddr, int h, int rwx, int target_page_bits)
{
hwaddr pteg_off;
target_ulong pte0, pte1;
@@ -429,7 +428,7 @@
/* We have a TLB that saves 4K pages, so let's
* split a huge page to 4k chunks */
if (target_page_bits != TARGET_PAGE_BITS) {
- ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
+ ctx->raddr |= (eaddr & ((1 << target_page_bits) - 1))
& TARGET_PAGE_MASK;
}
return ret;
@@ -444,7 +443,6 @@
int ret, ret2;
pr = msr_pr;
- ctx->eaddr = eaddr;
ppc_slb_t *slb;
target_ulong pageaddr;
int segment_bits;
@@ -500,14 +498,14 @@
env->htab_base, env->htab_mask, vsid, ctx->ptem,
ctx->hash[0]);
/* Primary table lookup */
- ret = find_pte64(env, ctx, 0, rwx, target_page_bits);
+ ret = find_pte64(env, ctx, eaddr, 0, rwx, target_page_bits);
if (ret < 0) {
/* Secondary table lookup */
LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
- ret2 = find_pte64(env, ctx, 1, rwx, target_page_bits);
+ ret2 = find_pte64(env, ctx, eaddr, 1, rwx, target_page_bits);
if (ret2 != -1) {
ret = ret2;
}