| /* |
| * PowerPC emulation for qemu: main translation routines. |
| * |
| * Copyright (c) 2003-2007 Jocelyn Mayer |
| * Copyright (C) 2011 Freescale Semiconductor, Inc. |
| * |
| * This library is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU Lesser General Public |
| * License as published by the Free Software Foundation; either |
| * version 2 of the License, or (at your option) any later version. |
| * |
| * This library is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * Lesser General Public License for more details. |
| * |
| * You should have received a copy of the GNU Lesser General Public |
| * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| */ |
| |
| #include "qemu/osdep.h" |
| #include "cpu.h" |
| #include "disas/disas.h" |
| #include "exec/exec-all.h" |
| #include "tcg-op.h" |
| #include "qemu/host-utils.h" |
| #include "exec/cpu_ldst.h" |
| |
| #include "exec/helper-proto.h" |
| #include "exec/helper-gen.h" |
| |
| #include "trace-tcg.h" |
| #include "exec/log.h" |
| |
| |
| #define CPU_SINGLE_STEP 0x1 |
| #define CPU_BRANCH_STEP 0x2 |
| #define GDBSTUB_SINGLE_STEP 0x4 |
| |
| /* Include definitions for instructions classes and implementations flags */ |
| //#define PPC_DEBUG_DISAS |
| //#define DO_PPC_STATISTICS |
| |
| #ifdef PPC_DEBUG_DISAS |
| # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) |
| #else |
| # define LOG_DISAS(...) do { } while (0) |
| #endif |
| /*****************************************************************************/ |
| /* Code translation helpers */ |
| |
| /* global register indexes */ |
| static TCGv_env cpu_env; |
| static char cpu_reg_names[10*3 + 22*4 /* GPR */ |
| + 10*4 + 22*5 /* SPE GPRh */ |
| + 10*4 + 22*5 /* FPR */ |
| + 2*(10*6 + 22*7) /* AVRh, AVRl */ |
| + 10*5 + 22*6 /* VSR */ |
| + 8*5 /* CRF */]; |
| static TCGv cpu_gpr[32]; |
| static TCGv cpu_gprh[32]; |
| static TCGv_i64 cpu_fpr[32]; |
| static TCGv_i64 cpu_avrh[32], cpu_avrl[32]; |
| static TCGv_i64 cpu_vsr[32]; |
| static TCGv_i32 cpu_crf[8]; |
| static TCGv cpu_nip; |
| static TCGv cpu_msr; |
| static TCGv cpu_ctr; |
| static TCGv cpu_lr; |
| #if defined(TARGET_PPC64) |
| static TCGv cpu_cfar; |
| #endif |
| static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca; |
| static TCGv cpu_reserve; |
| static TCGv cpu_fpscr; |
| static TCGv_i32 cpu_access_type; |
| |
| #include "exec/gen-icount.h" |
| |
| void ppc_translate_init(void) |
| { |
| int i; |
| char* p; |
| size_t cpu_reg_names_size; |
| static int done_init = 0; |
| |
| if (done_init) |
| return; |
| |
| cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); |
| tcg_ctx.tcg_env = cpu_env; |
| |
| p = cpu_reg_names; |
| cpu_reg_names_size = sizeof(cpu_reg_names); |
| |
| for (i = 0; i < 8; i++) { |
| snprintf(p, cpu_reg_names_size, "crf%d", i); |
| cpu_crf[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUPPCState, crf[i]), p); |
| p += 5; |
| cpu_reg_names_size -= 5; |
| } |
| |
| for (i = 0; i < 32; i++) { |
| snprintf(p, cpu_reg_names_size, "r%d", i); |
| cpu_gpr[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, gpr[i]), p); |
| p += (i < 10) ? 3 : 4; |
| cpu_reg_names_size -= (i < 10) ? 3 : 4; |
| snprintf(p, cpu_reg_names_size, "r%dH", i); |
| cpu_gprh[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, gprh[i]), p); |
| p += (i < 10) ? 4 : 5; |
| cpu_reg_names_size -= (i < 10) ? 4 : 5; |
| |
| snprintf(p, cpu_reg_names_size, "fp%d", i); |
| cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, fpr[i]), p); |
| p += (i < 10) ? 4 : 5; |
| cpu_reg_names_size -= (i < 10) ? 4 : 5; |
| |
| snprintf(p, cpu_reg_names_size, "avr%dH", i); |
| #ifdef HOST_WORDS_BIGENDIAN |
| cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, avr[i].u64[0]), p); |
| #else |
| cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, avr[i].u64[1]), p); |
| #endif |
| p += (i < 10) ? 6 : 7; |
| cpu_reg_names_size -= (i < 10) ? 6 : 7; |
| |
| snprintf(p, cpu_reg_names_size, "avr%dL", i); |
| #ifdef HOST_WORDS_BIGENDIAN |
| cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, avr[i].u64[1]), p); |
| #else |
| cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, avr[i].u64[0]), p); |
| #endif |
| p += (i < 10) ? 6 : 7; |
| cpu_reg_names_size -= (i < 10) ? 6 : 7; |
| snprintf(p, cpu_reg_names_size, "vsr%d", i); |
| cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUPPCState, vsr[i]), p); |
| p += (i < 10) ? 5 : 6; |
| cpu_reg_names_size -= (i < 10) ? 5 : 6; |
| } |
| |
| cpu_nip = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, nip), "nip"); |
| |
| cpu_msr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, msr), "msr"); |
| |
| cpu_ctr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ctr), "ctr"); |
| |
| cpu_lr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, lr), "lr"); |
| |
| #if defined(TARGET_PPC64) |
| cpu_cfar = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, cfar), "cfar"); |
| #endif |
| |
| cpu_xer = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, xer), "xer"); |
| cpu_so = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, so), "SO"); |
| cpu_ov = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ov), "OV"); |
| cpu_ca = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ca), "CA"); |
| |
| cpu_reserve = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, reserve_addr), |
| "reserve_addr"); |
| |
| cpu_fpscr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, fpscr), "fpscr"); |
| |
| cpu_access_type = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUPPCState, access_type), "access_type"); |
| |
| done_init = 1; |
| } |
| |
| /* internal defines */ |
| struct DisasContext { |
| struct TranslationBlock *tb; |
| target_ulong nip; |
| uint32_t opcode; |
| uint32_t exception; |
| /* Routine used to access memory */ |
| bool pr, hv, dr, le_mode; |
| bool lazy_tlb_flush; |
| int mem_idx; |
| int access_type; |
| /* Translation flags */ |
| TCGMemOp default_tcg_memop_mask; |
| #if defined(TARGET_PPC64) |
| bool sf_mode; |
| bool has_cfar; |
| #endif |
| bool fpu_enabled; |
| bool altivec_enabled; |
| bool vsx_enabled; |
| bool spe_enabled; |
| bool tm_enabled; |
| ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ |
| int singlestep_enabled; |
| uint64_t insns_flags; |
| uint64_t insns_flags2; |
| }; |
| |
| /* Return true iff byteswap is needed in a scalar memop */ |
| static inline bool need_byteswap(const DisasContext *ctx) |
| { |
| #if defined(TARGET_WORDS_BIGENDIAN) |
| return ctx->le_mode; |
| #else |
| return !ctx->le_mode; |
| #endif |
| } |
| |
| /* True when active word size < size of target_long. */ |
| #ifdef TARGET_PPC64 |
| # define NARROW_MODE(C) (!(C)->sf_mode) |
| #else |
| # define NARROW_MODE(C) 0 |
| #endif |
| |
| struct opc_handler_t { |
| /* invalid bits for instruction 1 (Rc(opcode) == 0) */ |
| uint32_t inval1; |
| /* invalid bits for instruction 2 (Rc(opcode) == 1) */ |
| uint32_t inval2; |
| /* instruction type */ |
| uint64_t type; |
| /* extended instruction type */ |
| uint64_t type2; |
| /* handler */ |
| void (*handler)(DisasContext *ctx); |
| #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) |
| const char *oname; |
| #endif |
| #if defined(DO_PPC_STATISTICS) |
| uint64_t count; |
| #endif |
| }; |
| |
| static inline void gen_reset_fpstatus(void) |
| { |
| gen_helper_reset_fpstatus(cpu_env); |
| } |
| |
| static inline void gen_compute_fprf(TCGv_i64 arg) |
| { |
| gen_helper_compute_fprf(cpu_env, arg); |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| static inline void gen_set_access_type(DisasContext *ctx, int access_type) |
| { |
| if (ctx->access_type != access_type) { |
| tcg_gen_movi_i32(cpu_access_type, access_type); |
| ctx->access_type = access_type; |
| } |
| } |
| |
| static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) |
| { |
| if (NARROW_MODE(ctx)) { |
| nip = (uint32_t)nip; |
| } |
| tcg_gen_movi_tl(cpu_nip, nip); |
| } |
| |
| void gen_update_current_nip(void *opaque) |
| { |
| DisasContext *ctx = opaque; |
| |
| tcg_gen_movi_tl(cpu_nip, ctx->nip); |
| } |
| |
| static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) |
| { |
| TCGv_i32 t0, t1; |
| if (ctx->exception == POWERPC_EXCP_NONE) { |
| gen_update_nip(ctx, ctx->nip); |
| } |
| t0 = tcg_const_i32(excp); |
| t1 = tcg_const_i32(error); |
| gen_helper_raise_exception_err(cpu_env, t0, t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| ctx->exception = (excp); |
| } |
| |
| static void gen_exception(DisasContext *ctx, uint32_t excp) |
| { |
| TCGv_i32 t0; |
| if (ctx->exception == POWERPC_EXCP_NONE) { |
| gen_update_nip(ctx, ctx->nip); |
| } |
| t0 = tcg_const_i32(excp); |
| gen_helper_raise_exception(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| ctx->exception = (excp); |
| } |
| |
| static void gen_debug_exception(DisasContext *ctx) |
| { |
| TCGv_i32 t0; |
| |
| if ((ctx->exception != POWERPC_EXCP_BRANCH) && |
| (ctx->exception != POWERPC_EXCP_SYNC)) { |
| gen_update_nip(ctx, ctx->nip); |
| } |
| t0 = tcg_const_i32(EXCP_DEBUG); |
| gen_helper_raise_exception(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| } |
| |
| static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) |
| { |
| /* Will be converted to program check if needed */ |
| gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); |
| } |
| |
| static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) |
| { |
| gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); |
| } |
| |
| static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) |
| { |
| /* Will be converted to program check if needed */ |
| gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); |
| } |
| |
| /* Stop translation */ |
| static inline void gen_stop_exception(DisasContext *ctx) |
| { |
| gen_update_nip(ctx, ctx->nip); |
| ctx->exception = POWERPC_EXCP_STOP; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| /* No need to update nip here, as execution flow will change */ |
| static inline void gen_sync_exception(DisasContext *ctx) |
| { |
| ctx->exception = POWERPC_EXCP_SYNC; |
| } |
| #endif |
| |
| #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ |
| GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) |
| |
| #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ |
| GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) |
| |
| #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ |
| GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) |
| |
| #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ |
| GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) |
| |
| typedef struct opcode_t { |
| unsigned char opc1, opc2, opc3; |
| #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ |
| unsigned char pad[5]; |
| #else |
| unsigned char pad[1]; |
| #endif |
| opc_handler_t handler; |
| const char *oname; |
| } opcode_t; |
| |
| /* Helpers for priv. check */ |
| #define GEN_PRIV \ |
| do { \ |
| gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ |
| } while (0) |
| |
| #if defined(CONFIG_USER_ONLY) |
| #define CHK_HV GEN_PRIV |
| #define CHK_SV GEN_PRIV |
| #define CHK_HVRM GEN_PRIV |
| #else |
| #define CHK_HV \ |
| do { \ |
| if (unlikely(ctx->pr || !ctx->hv)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #define CHK_SV \ |
| do { \ |
| if (unlikely(ctx->pr)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #define CHK_HVRM \ |
| do { \ |
| if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #endif |
| |
| #define CHK_NONE |
| |
| |
| /*****************************************************************************/ |
| /*** Instruction decoding ***/ |
| #define EXTRACT_HELPER(name, shift, nb) \ |
| static inline uint32_t name(uint32_t opcode) \ |
| { \ |
| return (opcode >> (shift)) & ((1 << (nb)) - 1); \ |
| } |
| |
| #define EXTRACT_SHELPER(name, shift, nb) \ |
| static inline int32_t name(uint32_t opcode) \ |
| { \ |
| return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \ |
| } |
| |
| #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \ |
| static inline uint32_t name(uint32_t opcode) \ |
| { \ |
| return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \ |
| ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \ |
| } |
| /* Opcode part 1 */ |
| EXTRACT_HELPER(opc1, 26, 6); |
| /* Opcode part 2 */ |
| EXTRACT_HELPER(opc2, 1, 5); |
| /* Opcode part 3 */ |
| EXTRACT_HELPER(opc3, 6, 5); |
| /* Update Cr0 flags */ |
| EXTRACT_HELPER(Rc, 0, 1); |
| /* Update Cr6 flags (Altivec) */ |
| EXTRACT_HELPER(Rc21, 10, 1); |
| /* Destination */ |
| EXTRACT_HELPER(rD, 21, 5); |
| /* Source */ |
| EXTRACT_HELPER(rS, 21, 5); |
| /* First operand */ |
| EXTRACT_HELPER(rA, 16, 5); |
| /* Second operand */ |
| EXTRACT_HELPER(rB, 11, 5); |
| /* Third operand */ |
| EXTRACT_HELPER(rC, 6, 5); |
| /*** Get CRn ***/ |
| EXTRACT_HELPER(crfD, 23, 3); |
| EXTRACT_HELPER(crfS, 18, 3); |
| EXTRACT_HELPER(crbD, 21, 5); |
| EXTRACT_HELPER(crbA, 16, 5); |
| EXTRACT_HELPER(crbB, 11, 5); |
| /* SPR / TBL */ |
| EXTRACT_HELPER(_SPR, 11, 10); |
| static inline uint32_t SPR(uint32_t opcode) |
| { |
| uint32_t sprn = _SPR(opcode); |
| |
| return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); |
| } |
| /*** Get constants ***/ |
| /* 16 bits signed immediate value */ |
| EXTRACT_SHELPER(SIMM, 0, 16); |
| /* 16 bits unsigned immediate value */ |
| EXTRACT_HELPER(UIMM, 0, 16); |
| /* 5 bits signed immediate value */ |
| EXTRACT_HELPER(SIMM5, 16, 5); |
| /* 5 bits signed immediate value */ |
| EXTRACT_HELPER(UIMM5, 16, 5); |
| /* Bit count */ |
| EXTRACT_HELPER(NB, 11, 5); |
| /* Shift count */ |
| EXTRACT_HELPER(SH, 11, 5); |
| /* Vector shift count */ |
| EXTRACT_HELPER(VSH, 6, 4); |
| /* Mask start */ |
| EXTRACT_HELPER(MB, 6, 5); |
| /* Mask end */ |
| EXTRACT_HELPER(ME, 1, 5); |
| /* Trap operand */ |
| EXTRACT_HELPER(TO, 21, 5); |
| |
| EXTRACT_HELPER(CRM, 12, 8); |
| |
| #ifndef CONFIG_USER_ONLY |
| EXTRACT_HELPER(SR, 16, 4); |
| #endif |
| |
| /* mtfsf/mtfsfi */ |
| EXTRACT_HELPER(FPBF, 23, 3); |
| EXTRACT_HELPER(FPIMM, 12, 4); |
| EXTRACT_HELPER(FPL, 25, 1); |
| EXTRACT_HELPER(FPFLM, 17, 8); |
| EXTRACT_HELPER(FPW, 16, 1); |
| |
| /*** Jump target decoding ***/ |
| /* Immediate address */ |
| static inline target_ulong LI(uint32_t opcode) |
| { |
| return (opcode >> 0) & 0x03FFFFFC; |
| } |
| |
| static inline uint32_t BD(uint32_t opcode) |
| { |
| return (opcode >> 0) & 0xFFFC; |
| } |
| |
| EXTRACT_HELPER(BO, 21, 5); |
| EXTRACT_HELPER(BI, 16, 5); |
| /* Absolute/relative address */ |
| EXTRACT_HELPER(AA, 1, 1); |
| /* Link */ |
| EXTRACT_HELPER(LK, 0, 1); |
| |
| /* DFP Z22-form */ |
| EXTRACT_HELPER(DCM, 10, 6) |
| |
| /* DFP Z23-form */ |
| EXTRACT_HELPER(RMC, 9, 2) |
| |
| /* Create a mask between <start> and <end> bits */ |
| static inline target_ulong MASK(uint32_t start, uint32_t end) |
| { |
| target_ulong ret; |
| |
| #if defined(TARGET_PPC64) |
| if (likely(start == 0)) { |
| ret = UINT64_MAX << (63 - end); |
| } else if (likely(end == 63)) { |
| ret = UINT64_MAX >> start; |
| } |
| #else |
| if (likely(start == 0)) { |
| ret = UINT32_MAX << (31 - end); |
| } else if (likely(end == 31)) { |
| ret = UINT32_MAX >> start; |
| } |
| #endif |
| else { |
| ret = (((target_ulong)(-1ULL)) >> (start)) ^ |
| (((target_ulong)(-1ULL) >> (end)) >> 1); |
| if (unlikely(start > end)) |
| return ~ret; |
| } |
| |
| return ret; |
| } |
| |
| EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5); |
| EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5); |
| EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5); |
| EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5); |
| EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5); |
| EXTRACT_HELPER(DM, 8, 2); |
| EXTRACT_HELPER(UIM, 16, 2); |
| EXTRACT_HELPER(SHW, 8, 2); |
| EXTRACT_HELPER(SP, 19, 2); |
| /*****************************************************************************/ |
| /* PowerPC instructions table */ |
| |
| #if defined(DO_PPC_STATISTICS) |
| #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| .oname = stringify(name), \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl1, \ |
| .inval2 = invl2, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| .oname = stringify(name), \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| .oname = onam, \ |
| }, \ |
| .oname = onam, \ |
| } |
| #else |
| #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl1, \ |
| .inval2 = invl2, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .pad = { 0, }, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = onam, \ |
| } |
| #endif |
| |
| /* SPR load/store helpers */ |
| static inline void gen_load_spr(TCGv t, int reg) |
| { |
| tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); |
| } |
| |
| static inline void gen_store_spr(int reg, TCGv t) |
| { |
| tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); |
| } |
| |
| /* Invalid instruction */ |
| static void gen_invalid(DisasContext *ctx) |
| { |
| gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
| } |
| |
| static opc_handler_t invalid_handler = { |
| .inval1 = 0xFFFFFFFF, |
| .inval2 = 0xFFFFFFFF, |
| .type = PPC_NONE, |
| .type2 = PPC_NONE, |
| .handler = gen_invalid, |
| }; |
| |
| /*** Integer comparison ***/ |
| |
| static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); |
| |
| tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1); |
| tcg_gen_trunc_tl_i32(t1, t0); |
| tcg_gen_shli_i32(t1, t1, CRF_LT); |
| tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
| |
| tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1); |
| tcg_gen_trunc_tl_i32(t1, t0); |
| tcg_gen_shli_i32(t1, t1, CRF_GT); |
| tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
| |
| tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1); |
| tcg_gen_trunc_tl_i32(t1, t0); |
| tcg_gen_shli_i32(t1, t1, CRF_EQ); |
| tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
| |
| tcg_temp_free(t0); |
| tcg_temp_free_i32(t1); |
| } |
| |
| static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_const_tl(arg1); |
| gen_op_cmp(arg0, t0, s, crf); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) |
| { |
| TCGv t0, t1; |
| t0 = tcg_temp_new(); |
| t1 = tcg_temp_new(); |
| if (s) { |
| tcg_gen_ext32s_tl(t0, arg0); |
| tcg_gen_ext32s_tl(t1, arg1); |
| } else { |
| tcg_gen_ext32u_tl(t0, arg0); |
| tcg_gen_ext32u_tl(t1, arg1); |
| } |
| gen_op_cmp(t0, t1, s, crf); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_const_tl(arg1); |
| gen_op_cmp32(arg0, t0, s, crf); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) |
| { |
| if (NARROW_MODE(ctx)) { |
| gen_op_cmpi32(reg, 0, 1, 0); |
| } else { |
| gen_op_cmpi(reg, 0, 1, 0); |
| } |
| } |
| |
| /* cmp */ |
| static void gen_cmp(DisasContext *ctx) |
| { |
| if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
| gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
| 1, crfD(ctx->opcode)); |
| } else { |
| gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
| 1, crfD(ctx->opcode)); |
| } |
| } |
| |
| /* cmpi */ |
| static void gen_cmpi(DisasContext *ctx) |
| { |
| if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
| gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), |
| 1, crfD(ctx->opcode)); |
| } else { |
| gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), |
| 1, crfD(ctx->opcode)); |
| } |
| } |
| |
| /* cmpl */ |
| static void gen_cmpl(DisasContext *ctx) |
| { |
| if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
| gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
| 0, crfD(ctx->opcode)); |
| } else { |
| gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
| 0, crfD(ctx->opcode)); |
| } |
| } |
| |
| /* cmpli */ |
| static void gen_cmpli(DisasContext *ctx) |
| { |
| if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
| gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), |
| 0, crfD(ctx->opcode)); |
| } else { |
| gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), |
| 0, crfD(ctx->opcode)); |
| } |
| } |
| |
| /* isel (PowerPC 2.03 specification) */ |
| static void gen_isel(DisasContext *ctx) |
| { |
| uint32_t bi = rC(ctx->opcode); |
| uint32_t mask = 0x08 >> (bi & 0x03); |
| TCGv t0 = tcg_temp_new(); |
| TCGv zr; |
| |
| tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); |
| tcg_gen_andi_tl(t0, t0, mask); |
| |
| zr = tcg_const_tl(0); |
| tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, |
| rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, |
| cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(zr); |
| tcg_temp_free(t0); |
| } |
| |
| /* cmpb: PowerPC 2.05 specification */ |
| static void gen_cmpb(DisasContext *ctx) |
| { |
| gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| } |
| |
| /*** Integer arithmetic ***/ |
| |
| static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, |
| TCGv arg1, TCGv arg2, int sub) |
| { |
| TCGv t0 = tcg_temp_new(); |
| |
| tcg_gen_xor_tl(cpu_ov, arg0, arg2); |
| tcg_gen_xor_tl(t0, arg1, arg2); |
| if (sub) { |
| tcg_gen_and_tl(cpu_ov, cpu_ov, t0); |
| } else { |
| tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); |
| } |
| tcg_temp_free(t0); |
| if (NARROW_MODE(ctx)) { |
| tcg_gen_ext32s_tl(cpu_ov, cpu_ov); |
| } |
| tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1); |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| } |
| |
| /* Common add function */ |
| static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, bool add_ca, bool compute_ca, |
| bool compute_ov, bool compute_rc0) |
| { |
| TCGv t0 = ret; |
| |
| if (compute_ca || compute_ov) { |
| t0 = tcg_temp_new(); |
| } |
| |
| if (compute_ca) { |
| if (NARROW_MODE(ctx)) { |
| /* Caution: a non-obvious corner case of the spec is that we |
| must produce the *entire* 64-bit addition, but produce the |
| carry into bit 32. */ |
| TCGv t1 = tcg_temp_new(); |
| tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ |
| tcg_gen_add_tl(t0, arg1, arg2); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, t0, cpu_ca); |
| } |
| tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */ |
| tcg_temp_free(t1); |
| tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */ |
| tcg_gen_andi_tl(cpu_ca, cpu_ca, 1); |
| } else { |
| TCGv zero = tcg_const_tl(0); |
| if (add_ca) { |
| tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero); |
| tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero); |
| } else { |
| tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero); |
| } |
| tcg_temp_free(zero); |
| } |
| } else { |
| tcg_gen_add_tl(t0, arg1, arg2); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, t0, cpu_ca); |
| } |
| } |
| |
| if (compute_ov) { |
| gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); |
| } |
| if (unlikely(compute_rc0)) { |
| gen_set_Rc0(ctx, t0); |
| } |
| |
| if (!TCGV_EQUAL(t0, ret)) { |
| tcg_gen_mov_tl(ret, t0); |
| tcg_temp_free(t0); |
| } |
| } |
| /* Add functions with two operands */ |
| #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| } |
| /* Add functions with one operand and one immediate */ |
| #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ |
| add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| TCGv t0 = tcg_const_tl(const_val); \ |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], t0, \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| tcg_temp_free(t0); \ |
| } |
| |
| /* add add. addo addo. */ |
| GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) |
| GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) |
| /* addc addc. addco addco. */ |
| GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) |
| GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) |
| /* adde adde. addeo addeo. */ |
| GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) |
| GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) |
| /* addme addme. addmeo addmeo. */ |
| GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) |
| GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) |
| /* addze addze. addzeo addzeo.*/ |
| GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) |
| GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) |
| /* addi */ |
| static void gen_addi(DisasContext *ctx) |
| { |
| target_long simm = SIMM(ctx->opcode); |
| |
| if (rA(ctx->opcode) == 0) { |
| /* li case */ |
| tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm); |
| } else { |
| tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], simm); |
| } |
| } |
| /* addic addic.*/ |
| static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) |
| { |
| TCGv c = tcg_const_tl(SIMM(ctx->opcode)); |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| c, 0, 1, 0, compute_rc0); |
| tcg_temp_free(c); |
| } |
| |
| static void gen_addic(DisasContext *ctx) |
| { |
| gen_op_addic(ctx, 0); |
| } |
| |
| static void gen_addic_(DisasContext *ctx) |
| { |
| gen_op_addic(ctx, 1); |
| } |
| |
| /* addis */ |
| static void gen_addis(DisasContext *ctx) |
| { |
| target_long simm = SIMM(ctx->opcode); |
| |
| if (rA(ctx->opcode) == 0) { |
| /* lis case */ |
| tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16); |
| } else { |
| tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], simm << 16); |
| } |
| } |
| |
| static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign, int compute_ov) |
| { |
| TCGLabel *l1 = gen_new_label(); |
| TCGLabel *l2 = gen_new_label(); |
| TCGv_i32 t0 = tcg_temp_local_new_i32(); |
| TCGv_i32 t1 = tcg_temp_local_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, arg1); |
| tcg_gen_trunc_tl_i32(t1, arg2); |
| tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1); |
| if (sign) { |
| TCGLabel *l3 = gen_new_label(); |
| tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3); |
| tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1); |
| gen_set_label(l3); |
| tcg_gen_div_i32(t0, t0, t1); |
| } else { |
| tcg_gen_divu_i32(t0, t0, t1); |
| } |
| if (compute_ov) { |
| tcg_gen_movi_tl(cpu_ov, 0); |
| } |
| tcg_gen_br(l2); |
| gen_set_label(l1); |
| if (sign) { |
| tcg_gen_sari_i32(t0, t0, 31); |
| } else { |
| tcg_gen_movi_i32(t0, 0); |
| } |
| if (compute_ov) { |
| tcg_gen_movi_tl(cpu_ov, 1); |
| tcg_gen_movi_tl(cpu_so, 1); |
| } |
| gen_set_label(l2); |
| tcg_gen_extu_i32_tl(ret, t0); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, ret); |
| } |
| /* Div functions */ |
| #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign, compute_ov); \ |
| } |
| /* divwu divwu. divwuo divwuo. */ |
| GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); |
| GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); |
| /* divw divw. divwo divwo. */ |
| GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); |
| GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); |
| |
| /* div[wd]eu[o][.] */ |
| #define GEN_DIVE(name, hlpr, compute_ov) \ |
| static void gen_##name(DisasContext *ctx) \ |
| { \ |
| TCGv_i32 t0 = tcg_const_i32(compute_ov); \ |
| gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ |
| tcg_temp_free_i32(t0); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ |
| } \ |
| } |
| |
| GEN_DIVE(divweu, divweu, 0); |
| GEN_DIVE(divweuo, divweu, 1); |
| GEN_DIVE(divwe, divwe, 0); |
| GEN_DIVE(divweo, divwe, 1); |
| |
| #if defined(TARGET_PPC64) |
| static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign, int compute_ov) |
| { |
| TCGLabel *l1 = gen_new_label(); |
| TCGLabel *l2 = gen_new_label(); |
| |
| tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1); |
| if (sign) { |
| TCGLabel *l3 = gen_new_label(); |
| tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3); |
| tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1); |
| gen_set_label(l3); |
| tcg_gen_div_i64(ret, arg1, arg2); |
| } else { |
| tcg_gen_divu_i64(ret, arg1, arg2); |
| } |
| if (compute_ov) { |
| tcg_gen_movi_tl(cpu_ov, 0); |
| } |
| tcg_gen_br(l2); |
| gen_set_label(l1); |
| if (sign) { |
| tcg_gen_sari_i64(ret, arg1, 63); |
| } else { |
| tcg_gen_movi_i64(ret, 0); |
| } |
| if (compute_ov) { |
| tcg_gen_movi_tl(cpu_ov, 1); |
| tcg_gen_movi_tl(cpu_so, 1); |
| } |
| gen_set_label(l2); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, ret); |
| } |
| #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign, compute_ov); \ |
| } |
| /* divwu divwu. divwuo divwuo. */ |
| GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); |
| GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); |
| /* divw divw. divwo divwo. */ |
| GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); |
| GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); |
| |
| GEN_DIVE(divdeu, divdeu, 0); |
| GEN_DIVE(divdeuo, divdeu, 1); |
| GEN_DIVE(divde, divde, 0); |
| GEN_DIVE(divdeo, divde, 1); |
| #endif |
| |
| /* mulhw mulhw. */ |
| static void gen_mulhw(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_muls2_i32(t0, t1, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| |
| /* mulhwu mulhwu. */ |
| static void gen_mulhwu(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mulu2_i32(t0, t1, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| |
| /* mullw mullw. */ |
| static void gen_mullw(DisasContext *ctx) |
| { |
| #if defined(TARGET_PPC64) |
| TCGv_i64 t0, t1; |
| t0 = tcg_temp_new_i64(); |
| t1 = tcg_temp_new_i64(); |
| tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| #else |
| tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| #endif |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| |
| /* mullwo mullwo. */ |
| static void gen_mullwo(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_muls2_i32(t0, t1, t0, t1); |
| #if defined(TARGET_PPC64) |
| tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); |
| #else |
| tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); |
| #endif |
| |
| tcg_gen_sari_i32(t0, t0, 31); |
| tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_ov, t0); |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| |
| /* mulli */ |
| static void gen_mulli(DisasContext *ctx) |
| { |
| tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| SIMM(ctx->opcode)); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* mulhd mulhd. */ |
| static void gen_mulhd(DisasContext *ctx) |
| { |
| TCGv lo = tcg_temp_new(); |
| tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(lo); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulhdu mulhdu. */ |
| static void gen_mulhdu(DisasContext *ctx) |
| { |
| TCGv lo = tcg_temp_new(); |
| tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(lo); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulld mulld. */ |
| static void gen_mulld(DisasContext *ctx) |
| { |
| tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| |
| /* mulldo mulldo. */ |
| static void gen_mulldo(DisasContext *ctx) |
| { |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| |
| tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); |
| |
| tcg_gen_sari_i64(t0, t0, 63); |
| tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| |
| tcg_temp_free_i64(t0); |
| tcg_temp_free_i64(t1); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| #endif |
| |
| /* Common subf function */ |
| static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, bool add_ca, bool compute_ca, |
| bool compute_ov, bool compute_rc0) |
| { |
| TCGv t0 = ret; |
| |
| if (compute_ca || compute_ov) { |
| t0 = tcg_temp_new(); |
| } |
| |
| if (compute_ca) { |
| /* dest = ~arg1 + arg2 [+ ca]. */ |
| if (NARROW_MODE(ctx)) { |
| /* Caution: a non-obvious corner case of the spec is that we |
| must produce the *entire* 64-bit addition, but produce the |
| carry into bit 32. */ |
| TCGv inv1 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| tcg_gen_not_tl(inv1, arg1); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, arg2, cpu_ca); |
| } else { |
| tcg_gen_addi_tl(t0, arg2, 1); |
| } |
| tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ |
| tcg_gen_add_tl(t0, t0, inv1); |
| tcg_temp_free(inv1); |
| tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ |
| tcg_temp_free(t1); |
| tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */ |
| tcg_gen_andi_tl(cpu_ca, cpu_ca, 1); |
| } else if (add_ca) { |
| TCGv zero, inv1 = tcg_temp_new(); |
| tcg_gen_not_tl(inv1, arg1); |
| zero = tcg_const_tl(0); |
| tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); |
| tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); |
| tcg_temp_free(zero); |
| tcg_temp_free(inv1); |
| } else { |
| tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| } |
| } else if (add_ca) { |
| /* Since we're ignoring carry-out, we can simplify the |
| standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */ |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| tcg_gen_add_tl(t0, t0, cpu_ca); |
| tcg_gen_subi_tl(t0, t0, 1); |
| } else { |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| } |
| |
| if (compute_ov) { |
| gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); |
| } |
| if (unlikely(compute_rc0)) { |
| gen_set_Rc0(ctx, t0); |
| } |
| |
| if (!TCGV_EQUAL(t0, ret)) { |
| tcg_gen_mov_tl(ret, t0); |
| tcg_temp_free(t0); |
| } |
| } |
| /* Sub functions with Two operands functions */ |
| #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| } |
| /* Sub functions with one operand and one immediate */ |
| #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ |
| add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| TCGv t0 = tcg_const_tl(const_val); \ |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], t0, \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| tcg_temp_free(t0); \ |
| } |
| /* subf subf. subfo subfo. */ |
| GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) |
| GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) |
| /* subfc subfc. subfco subfco. */ |
| GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) |
| GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) |
| /* subfe subfe. subfeo subfo. */ |
| GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) |
| GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) |
| /* subfme subfme. subfmeo subfmeo. */ |
| GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) |
| GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) |
| /* subfze subfze. subfzeo subfzeo.*/ |
| GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) |
| GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) |
| |
| /* subfic */ |
| static void gen_subfic(DisasContext *ctx) |
| { |
| TCGv c = tcg_const_tl(SIMM(ctx->opcode)); |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| c, 0, 1, 0, 0); |
| tcg_temp_free(c); |
| } |
| |
| /* neg neg. nego nego. */ |
| static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) |
| { |
| TCGv zero = tcg_const_tl(0); |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| zero, 0, 0, compute_ov, Rc(ctx->opcode)); |
| tcg_temp_free(zero); |
| } |
| |
| static void gen_neg(DisasContext *ctx) |
| { |
| gen_op_arith_neg(ctx, 0); |
| } |
| |
| static void gen_nego(DisasContext *ctx) |
| { |
| gen_op_arith_neg(ctx, 1); |
| } |
| |
| /*** Integer logical ***/ |
| #define GEN_LOGICAL2(name, tcg_op, opc, type) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ |
| cpu_gpr[rB(ctx->opcode)]); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) \ |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
| } |
| |
| #define GEN_LOGICAL1(name, tcg_op, opc, type) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) \ |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
| } |
| |
| /* and & and. */ |
| GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); |
| /* andc & andc. */ |
| GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); |
| |
| /* andi. */ |
| static void gen_andi_(DisasContext *ctx) |
| { |
| tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode)); |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* andis. */ |
| static void gen_andis_(DisasContext *ctx) |
| { |
| tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16); |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* cntlzw */ |
| static void gen_cntlzw(DisasContext *ctx) |
| { |
| gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| /* eqv & eqv. */ |
| GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); |
| /* extsb & extsb. */ |
| GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); |
| /* extsh & extsh. */ |
| GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); |
| /* nand & nand. */ |
| GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); |
| /* nor & nor. */ |
| GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); |
| |
| #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) |
| static void gen_pause(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_const_i32(0); |
| tcg_gen_st_i32(t0, cpu_env, |
| -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); |
| tcg_temp_free_i32(t0); |
| |
| /* Stop translation, this gives other CPUs a chance to run */ |
| gen_exception_err(ctx, EXCP_HLT, 1); |
| } |
| #endif /* defined(TARGET_PPC64) */ |
| |
| /* or & or. */ |
| static void gen_or(DisasContext *ctx) |
| { |
| int rs, ra, rb; |
| |
| rs = rS(ctx->opcode); |
| ra = rA(ctx->opcode); |
| rb = rB(ctx->opcode); |
| /* Optimisation for mr. ri case */ |
| if (rs != ra || rs != rb) { |
| if (rs != rb) |
| tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); |
| else |
| tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[ra]); |
| } else if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rs]); |
| #if defined(TARGET_PPC64) |
| } else if (rs != 0) { /* 0 is nop */ |
| int prio = 0; |
| |
| switch (rs) { |
| case 1: |
| /* Set process priority to low */ |
| prio = 2; |
| break; |
| case 6: |
| /* Set process priority to medium-low */ |
| prio = 3; |
| break; |
| case 2: |
| /* Set process priority to normal */ |
| prio = 4; |
| break; |
| #if !defined(CONFIG_USER_ONLY) |
| case 31: |
| if (!ctx->pr) { |
| /* Set process priority to very low */ |
| prio = 1; |
| } |
| break; |
| case 5: |
| if (!ctx->pr) { |
| /* Set process priority to medium-hight */ |
| prio = 5; |
| } |
| break; |
| case 3: |
| if (!ctx->pr) { |
| /* Set process priority to high */ |
| prio = 6; |
| } |
| break; |
| case 7: |
| if (ctx->hv && !ctx->pr) { |
| /* Set process priority to very high */ |
| prio = 7; |
| } |
| break; |
| #endif |
| default: |
| break; |
| } |
| if (prio) { |
| TCGv t0 = tcg_temp_new(); |
| gen_load_spr(t0, SPR_PPR); |
| tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); |
| tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); |
| gen_store_spr(SPR_PPR, t0); |
| tcg_temp_free(t0); |
| } |
| #if !defined(CONFIG_USER_ONLY) |
| /* Pause out of TCG otherwise spin loops with smt_low eat too much |
| * CPU and the kernel hangs. This applies to all encodings other |
| * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30), |
| * and all currently undefined. |
| */ |
| gen_pause(ctx); |
| #endif |
| #endif |
| } |
| } |
| /* orc & orc. */ |
| GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); |
| |
| /* xor & xor. */ |
| static void gen_xor(DisasContext *ctx) |
| { |
| /* Optimisation for "set to zero" case */ |
| if (rS(ctx->opcode) != rB(ctx->opcode)) |
| tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| else |
| tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* ori */ |
| static void gen_ori(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| return; |
| } |
| tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
| } |
| |
| /* oris */ |
| static void gen_oris(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); |
| } |
| |
| /* xori */ |
| static void gen_xori(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
| } |
| |
| /* xoris */ |
| static void gen_xoris(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); |
| } |
| |
| /* popcntb : PowerPC 2.03 specification */ |
| static void gen_popcntb(DisasContext *ctx) |
| { |
| gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| } |
| |
| static void gen_popcntw(DisasContext *ctx) |
| { |
| gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* popcntd: PowerPC 2.06 specification */ |
| static void gen_popcntd(DisasContext *ctx) |
| { |
| gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| } |
| #endif |
| |
| /* prtyw: PowerPC 2.05 specification */ |
| static void gen_prtyw(DisasContext *ctx) |
| { |
| TCGv ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_shri_tl(t0, rs, 16); |
| tcg_gen_xor_tl(ra, rs, t0); |
| tcg_gen_shri_tl(t0, ra, 8); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); |
| tcg_temp_free(t0); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* prtyd: PowerPC 2.05 specification */ |
| static void gen_prtyd(DisasContext *ctx) |
| { |
| TCGv ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_shri_tl(t0, rs, 32); |
| tcg_gen_xor_tl(ra, rs, t0); |
| tcg_gen_shri_tl(t0, ra, 16); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_shri_tl(t0, ra, 8); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_andi_tl(ra, ra, 1); |
| tcg_temp_free(t0); |
| } |
| #endif |
| |
| #if defined(TARGET_PPC64) |
| /* bpermd */ |
| static void gen_bpermd(DisasContext *ctx) |
| { |
| gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| } |
| #endif |
| |
| #if defined(TARGET_PPC64) |
| /* extsw & extsw. */ |
| GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); |
| |
| /* cntlzd */ |
| static void gen_cntlzd(DisasContext *ctx) |
| { |
| gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| #endif |
| |
| /*** Integer rotate ***/ |
| |
| /* rlwimi & rlwimi. */ |
| static void gen_rlwimi(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| uint32_t sh = SH(ctx->opcode); |
| uint32_t mb = MB(ctx->opcode); |
| uint32_t me = ME(ctx->opcode); |
| |
| if (sh == (31-me) && mb <= me) { |
| tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); |
| } else { |
| target_ulong mask; |
| TCGv t1; |
| |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| |
| t1 = tcg_temp_new(); |
| if (mask <= 0xffffffffu) { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rs); |
| tcg_gen_rotli_i32(t0, t0, sh); |
| tcg_gen_extu_i32_tl(t1, t0); |
| tcg_temp_free_i32(t0); |
| } else { |
| #if defined(TARGET_PPC64) |
| tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); |
| tcg_gen_rotli_i64(t1, t1, sh); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| |
| tcg_gen_andi_tl(t1, t1, mask); |
| tcg_gen_andi_tl(t_ra, t_ra, ~mask); |
| tcg_gen_or_tl(t_ra, t_ra, t1); |
| tcg_temp_free(t1); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rlwinm & rlwinm. */ |
| static void gen_rlwinm(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| uint32_t sh = SH(ctx->opcode); |
| uint32_t mb = MB(ctx->opcode); |
| uint32_t me = ME(ctx->opcode); |
| |
| if (mb == 0 && me == (31 - sh)) { |
| tcg_gen_shli_tl(t_ra, t_rs, sh); |
| tcg_gen_ext32u_tl(t_ra, t_ra); |
| } else if (sh != 0 && me == 31 && sh == (32 - mb)) { |
| tcg_gen_ext32u_tl(t_ra, t_rs); |
| tcg_gen_shri_tl(t_ra, t_ra, mb); |
| } else { |
| target_ulong mask; |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| |
| if (mask <= 0xffffffffu) { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rs); |
| tcg_gen_rotli_i32(t0, t0, sh); |
| tcg_gen_andi_i32(t0, t0, mask); |
| tcg_gen_extu_i32_tl(t_ra, t0); |
| tcg_temp_free_i32(t0); |
| } else { |
| #if defined(TARGET_PPC64) |
| tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); |
| tcg_gen_rotli_i64(t_ra, t_ra, sh); |
| tcg_gen_andi_i64(t_ra, t_ra, mask); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rlwnm & rlwnm. */ |
| static void gen_rlwnm(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; |
| uint32_t mb = MB(ctx->opcode); |
| uint32_t me = ME(ctx->opcode); |
| target_ulong mask; |
| |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| |
| if (mask <= 0xffffffffu) { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rb); |
| tcg_gen_trunc_tl_i32(t1, t_rs); |
| tcg_gen_andi_i32(t0, t0, 0x1f); |
| tcg_gen_rotl_i32(t1, t1, t0); |
| tcg_gen_extu_i32_tl(t_ra, t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| } else { |
| #if defined(TARGET_PPC64) |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| tcg_gen_andi_i64(t0, t_rb, 0x1f); |
| tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); |
| tcg_gen_rotl_i64(t_ra, t_ra, t0); |
| tcg_temp_free_i64(t0); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| |
| tcg_gen_andi_tl(t_ra, t_ra, mask); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| #if defined(TARGET_PPC64) |
| #define GEN_PPC64_R2(name, opc1, opc2) \ |
| static void glue(gen_, name##0)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##1)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1); \ |
| } |
| #define GEN_PPC64_R4(name, opc1, opc2) \ |
| static void glue(gen_, name##0)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##1)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0, 1); \ |
| } \ |
| \ |
| static void glue(gen_, name##2)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##3)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1, 1); \ |
| } |
| |
| static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| |
| if (sh != 0 && mb == 0 && me == (63 - sh)) { |
| tcg_gen_shli_tl(t_ra, t_rs, sh); |
| } else if (sh != 0 && me == 63 && sh == (64 - mb)) { |
| tcg_gen_shri_tl(t_ra, t_rs, mb); |
| } else { |
| tcg_gen_rotli_tl(t_ra, t_rs, sh); |
| tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rldicl - rldicl. */ |
| static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) |
| { |
| uint32_t sh, mb; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldinm(ctx, mb, 63, sh); |
| } |
| GEN_PPC64_R4(rldicl, 0x1E, 0x00); |
| |
| /* rldicr - rldicr. */ |
| static inline void gen_rldicr(DisasContext *ctx, int men, int shn) |
| { |
| uint32_t sh, me; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| me = MB(ctx->opcode) | (men << 5); |
| gen_rldinm(ctx, 0, me, sh); |
| } |
| GEN_PPC64_R4(rldicr, 0x1E, 0x02); |
| |
| /* rldic - rldic. */ |
| static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) |
| { |
| uint32_t sh, mb; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldinm(ctx, mb, 63 - sh, sh); |
| } |
| GEN_PPC64_R4(rldic, 0x1E, 0x04); |
| |
| static void gen_rldnm(DisasContext *ctx, int mb, int me) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; |
| TCGv t0; |
| |
| t0 = tcg_temp_new(); |
| tcg_gen_andi_tl(t0, t_rb, 0x3f); |
| tcg_gen_rotl_tl(t_ra, t_rs, t0); |
| tcg_temp_free(t0); |
| |
| tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rldcl - rldcl. */ |
| static inline void gen_rldcl(DisasContext *ctx, int mbn) |
| { |
| uint32_t mb; |
| |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldnm(ctx, mb, 63); |
| } |
| GEN_PPC64_R2(rldcl, 0x1E, 0x08); |
| |
| /* rldcr - rldcr. */ |
| static inline void gen_rldcr(DisasContext *ctx, int men) |
| { |
| uint32_t me; |
| |
| me = MB(ctx->opcode) | (men << 5); |
| gen_rldnm(ctx, 0, me); |
| } |
| GEN_PPC64_R2(rldcr, 0x1E, 0x09); |
| |
| /* rldimi - rldimi. */ |
| static void gen_rldimi(DisasContext *ctx, int mbn, int shn) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| uint32_t sh = SH(ctx->opcode) | (shn << 5); |
| uint32_t mb = MB(ctx->opcode) | (mbn << 5); |
| uint32_t me = 63 - sh; |
| |
| if (mb <= me) { |
| tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); |
| } else { |
| target_ulong mask = MASK(mb, me); |
| TCGv t1 = tcg_temp_new(); |
| |
| tcg_gen_rotli_tl(t1, t_rs, sh); |
| tcg_gen_andi_tl(t1, t1, mask); |
| tcg_gen_andi_tl(t_ra, t_ra, ~mask); |
| tcg_gen_or_tl(t_ra, t_ra, t1); |
| tcg_temp_free(t1); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| GEN_PPC64_R4(rldimi, 0x1E, 0x06); |
| #endif |
| |
| /*** Integer shift ***/ |
| |
| /* slw & slw. */ |
| static void gen_slw(DisasContext *ctx) |
| { |
| TCGv t0, t1; |
| |
| t0 = tcg_temp_new(); |
| /* AND rS with a mask that is 0 when rB >= 0x20 */ |
| #if defined(TARGET_PPC64) |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); |
| tcg_gen_sari_tl(t0, t0, 0x3f); |
| #else |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); |
| tcg_gen_sari_tl(t0, t0, 0x1f); |
| #endif |
| tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); |
| t1 = tcg_temp_new(); |
| tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); |
| tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* sraw & sraw. */ |
| static void gen_sraw(DisasContext *ctx) |
| { |
| gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env, |
| cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* srawi & srawi. */ |
| static void gen_srawi(DisasContext *ctx) |
| { |
| int sh = SH(ctx->opcode); |
| TCGv dst = cpu_gpr[rA(ctx->opcode)]; |
| TCGv src = cpu_gpr[rS(ctx->opcode)]; |
| if (sh == 0) { |
| tcg_gen_ext32s_tl(dst, src); |
| tcg_gen_movi_tl(cpu_ca, 0); |
| } else { |
| TCGv t0; |
| tcg_gen_ext32s_tl(dst, src); |
| tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1); |
| t0 = tcg_temp_new(); |
| tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1); |
| tcg_gen_and_tl(cpu_ca, cpu_ca, t0); |
| tcg_temp_free(t0); |
| tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); |
| tcg_gen_sari_tl(dst, dst, sh); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, dst); |
| } |
| } |
| |
| /* srw & srw. */ |
| static void gen_srw(DisasContext *ctx) |
| { |
| TCGv t0, t1; |
| |
| t0 = tcg_temp_new(); |
| /* AND rS with a mask that is 0 when rB >= 0x20 */ |
| #if defined(TARGET_PPC64) |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); |
| tcg_gen_sari_tl(t0, t0, 0x3f); |
| #else |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); |
| tcg_gen_sari_tl(t0, t0, 0x1f); |
| #endif |
| tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); |
| tcg_gen_ext32u_tl(t0, t0); |
| t1 = tcg_temp_new(); |
| tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); |
| tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* sld & sld. */ |
| static void gen_sld(DisasContext *ctx) |
| { |
| TCGv t0, t1; |
| |
| t0 = tcg_temp_new(); |
| /* AND rS with a mask that is 0 when rB >= 0x40 */ |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); |
| tcg_gen_sari_tl(t0, t0, 0x3f); |
| tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); |
| t1 = tcg_temp_new(); |
| tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); |
| tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* srad & srad. */ |
| static void gen_srad(DisasContext *ctx) |
| { |
| gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env, |
| cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| /* sradi & sradi. */ |
| static inline void gen_sradi(DisasContext *ctx, int n) |
| { |
| int sh = SH(ctx->opcode) + (n << 5); |
| TCGv dst = cpu_gpr[rA(ctx->opcode)]; |
| TCGv src = cpu_gpr[rS(ctx->opcode)]; |
| if (sh == 0) { |
| tcg_gen_mov_tl(dst, src); |
| tcg_gen_movi_tl(cpu_ca, 0); |
| } else { |
| TCGv t0; |
| tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1); |
| t0 = tcg_temp_new(); |
| tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1); |
| tcg_gen_and_tl(cpu_ca, cpu_ca, t0); |
| tcg_temp_free(t0); |
| tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); |
| tcg_gen_sari_tl(dst, src, sh); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, dst); |
| } |
| } |
| |
| static void gen_sradi0(DisasContext *ctx) |
| { |
| gen_sradi(ctx, 0); |
| } |
| |
| static void gen_sradi1(DisasContext *ctx) |
| { |
| gen_sradi(ctx, 1); |
| } |
| |
| /* srd & srd. */ |
| static void gen_srd(DisasContext *ctx) |
| { |
| TCGv t0, t1; |
| |
| t0 = tcg_temp_new(); |
| /* AND rS with a mask that is 0 when rB >= 0x40 */ |
| tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39); |
| tcg_gen_sari_tl(t0, t0, 0x3f); |
| tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); |
| t1 = tcg_temp_new(); |
| tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); |
| tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| if (unlikely(Rc(ctx->opcode) != 0)) |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| #endif |
| |
| #if defined(TARGET_PPC64) |
| static void gen_set_cr1_from_fpscr(DisasContext *ctx) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); |
| tcg_gen_shri_i32(cpu_crf[1], tmp, 28); |
| tcg_temp_free_i32(tmp); |
| } |
| #else |
| static void gen_set_cr1_from_fpscr(DisasContext *ctx) |
| { |
| tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28); |
| } |
| #endif |
| |
| /*** Floating-Point arithmetic ***/ |
| #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ |
| static void gen_f##name(DisasContext *ctx) \ |
| { \ |
| if (unlikely(!ctx->fpu_enabled)) { \ |
| gen_exception(ctx, POWERPC_EXCP_FPU); \ |
| return; \ |
| } \ |
| /* NIP cannot be restored if the memory exception comes from an helper */ \ |
| gen_update_nip(ctx, ctx->nip - 4); \ |
| gen_reset_fpstatus(); \ |
| gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rA(ctx->opcode)], \ |
| cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \ |
| if (isfloat) { \ |
| gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (set_fprf) { \ |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_cr1_from_fpscr(ctx); \ |
| } \ |
| } |
| |
| #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ |
| _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \ |
| _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type); |
| |
| #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ |
| static void gen_f##name(DisasContext *ctx) \ |
| { \ |
| if (unlikely(!ctx->fpu_enabled)) { \ |
| gen_exception(ctx, POWERPC_EXCP_FPU); \ |
| return; \ |
| } \ |
| /* NIP cannot be restored if the memory exception comes from an helper */ \ |
| gen_update_nip(ctx, ctx->nip - 4); \ |
| gen_reset_fpstatus(); \ |
| gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rA(ctx->opcode)], \ |
| cpu_fpr[rB(ctx->opcode)]); \ |
| if (isfloat) { \ |
| gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (set_fprf) { \ |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_cr1_from_fpscr(ctx); \ |
| } \ |
| } |
| #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ |
| _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ |
| _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); |
| |
| #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ |
| static void gen_f##name(DisasContext *ctx) \ |
| { \ |
| if (unlikely(!ctx->fpu_enabled)) { \ |
| gen_exception(ctx, POWERPC_EXCP_FPU); \ |
| return; \ |
| } \ |
| /* NIP cannot be restored if the memory exception comes from an helper */ \ |
| gen_update_nip(ctx, ctx->nip - 4); \ |
| gen_reset_fpstatus(); \ |
| gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rA(ctx->opcode)], \ |
| cpu_fpr[rC(ctx->opcode)]); \ |
| if (isfloat) { \ |
| gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (set_fprf) { \ |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_cr1_from_fpscr(ctx); \ |
| } \ |
| } |
| #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ |
| _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ |
| _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); |
| |
| #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ |
| static void gen_f##name(DisasContext *ctx) \ |
| { \ |
| if (unlikely(!ctx->fpu_enabled)) { \ |
| gen_exception(ctx, POWERPC_EXCP_FPU); \ |
| return; \ |
| } \ |
| /* NIP cannot be restored if the memory exception comes from an helper */ \ |
| gen_update_nip(ctx, ctx->nip - 4); \ |
| gen_reset_fpstatus(); \ |
| gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rB(ctx->opcode)]); \ |
| if (set_fprf) { \ |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_cr1_from_fpscr(ctx); \ |
| } \ |
| } |
| |
| #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ |
| static void gen_f##name(DisasContext *ctx) \ |
| { \ |
| if (unlikely(!ctx->fpu_enabled)) { \ |
| gen_exception(ctx, POWERPC_EXCP_FPU); \ |
| return; \ |
| } \ |
| /* NIP cannot be restored if the memory exception comes from an helper */ \ |
| gen_update_nip(ctx, ctx->nip - 4); \ |
| gen_reset_fpstatus(); \ |
| gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_fpr[rB(ctx->opcode)]); \ |
| if (set_fprf) { \ |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \ |
| } \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_cr1_from_fpscr(ctx); \ |
| } \ |
| } |
| |
| /* fadd - fadds */ |
| GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); |
| /* fdiv - fdivs */ |
| GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); |
| /* fmul - fmuls */ |
| GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); |
| |
| /* fre */ |
| GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); |
| |
| /* fres */ |
| GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); |
| |
| /* frsqrte */ |
| GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); |
| |
| /* frsqrtes */ |
| static void gen_frsqrtes(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env, |
| cpu_fpr[rB(ctx->opcode)]); |
| gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, |
| cpu_fpr[rD(ctx->opcode)]); |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* fsel */ |
| _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL); |
| /* fsub - fsubs */ |
| GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); |
| /* Optional: */ |
| |
| /* fsqrt */ |
| static void gen_fsqrt(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, |
| cpu_fpr[rB(ctx->opcode)]); |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| static void gen_fsqrts(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, |
| cpu_fpr[rB(ctx->opcode)]); |
| gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, |
| cpu_fpr[rD(ctx->opcode)]); |
| gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /*** Floating-Point multiply-and-add ***/ |
| /* fmadd - fmadds */ |
| GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); |
| /* fmsub - fmsubs */ |
| GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); |
| /* fnmadd - fnmadds */ |
| GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); |
| /* fnmsub - fnmsubs */ |
| GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); |
| |
| /*** Floating-Point round & convert ***/ |
| /* fctiw */ |
| GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); |
| /* fctiwu */ |
| GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); |
| /* fctiwz */ |
| GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); |
| /* fctiwuz */ |
| GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); |
| /* frsp */ |
| GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); |
| /* fcfid */ |
| GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); |
| /* fcfids */ |
| GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); |
| /* fcfidu */ |
| GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); |
| /* fcfidus */ |
| GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); |
| /* fctid */ |
| GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); |
| /* fctidu */ |
| GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); |
| /* fctidz */ |
| GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); |
| /* fctidu */ |
| GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); |
| |
| /* frin */ |
| GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); |
| /* friz */ |
| GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); |
| /* frip */ |
| GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); |
| /* frim */ |
| GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); |
| |
| static void gen_ftdiv(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], |
| cpu_fpr[rB(ctx->opcode)]); |
| } |
| |
| static void gen_ftsqrt(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); |
| } |
| |
| |
| |
| /*** Floating-Point compare ***/ |
| |
| /* fcmpo */ |
| static void gen_fcmpo(DisasContext *ctx) |
| { |
| TCGv_i32 crf; |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| crf = tcg_const_i32(crfD(ctx->opcode)); |
| gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)], |
| cpu_fpr[rB(ctx->opcode)], crf); |
| tcg_temp_free_i32(crf); |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| /* fcmpu */ |
| static void gen_fcmpu(DisasContext *ctx) |
| { |
| TCGv_i32 crf; |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| crf = tcg_const_i32(crfD(ctx->opcode)); |
| gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)], |
| cpu_fpr[rB(ctx->opcode)], crf); |
| tcg_temp_free_i32(crf); |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| /*** Floating-point move ***/ |
| /* fabs */ |
| /* XXX: beware that fabs never checks for NaNs nor update FPSCR */ |
| static void gen_fabs(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
| ~(1ULL << 63)); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* fmr - fmr. */ |
| /* XXX: beware that fmr never checks for NaNs nor update FPSCR */ |
| static void gen_fmr(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* fnabs */ |
| /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ |
| static void gen_fnabs(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
| 1ULL << 63); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* fneg */ |
| /* XXX: beware that fneg never checks for NaNs nor update FPSCR */ |
| static void gen_fneg(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
| 1ULL << 63); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* fcpsgn: PowerPC 2.05 specification */ |
| /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ |
| static void gen_fcpsgn(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], |
| cpu_fpr[rB(ctx->opcode)], 0, 63); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| static void gen_fmrgew(DisasContext *ctx) |
| { |
| TCGv_i64 b0; |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| b0 = tcg_temp_new_i64(); |
| tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32); |
| tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], |
| b0, 0, 32); |
| tcg_temp_free_i64(b0); |
| } |
| |
| static void gen_fmrgow(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], |
| cpu_fpr[rB(ctx->opcode)], |
| cpu_fpr[rA(ctx->opcode)], |
| 32, 32); |
| } |
| |
| /*** Floating-Point status & ctrl register ***/ |
| |
| /* mcrfs */ |
| static void gen_mcrfs(DisasContext *ctx) |
| { |
| TCGv tmp = tcg_temp_new(); |
| TCGv_i32 tmask; |
| TCGv_i64 tnew_fpscr = tcg_temp_new_i64(); |
| int bfa; |
| int nibble; |
| int shift; |
| |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| bfa = crfS(ctx->opcode); |
| nibble = 7 - bfa; |
| shift = 4 * nibble; |
| tcg_gen_shri_tl(tmp, cpu_fpscr, shift); |
| tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); |
| tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); |
| tcg_temp_free(tmp); |
| tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); |
| /* Only the exception bits (including FX) should be cleared if read */ |
| tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS)); |
| /* FEX and VX need to be updated, so don't set fpscr directly */ |
| tmask = tcg_const_i32(1 << nibble); |
| gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); |
| tcg_temp_free_i32(tmask); |
| tcg_temp_free_i64(tnew_fpscr); |
| } |
| |
| /* mffs */ |
| static void gen_mffs(DisasContext *ctx) |
| { |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| gen_reset_fpstatus(); |
| tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_cr1_from_fpscr(ctx); |
| } |
| } |
| |
| /* mtfsb0 */ |
| static void gen_mtfsb0(DisasContext *ctx) |
| { |
| uint8_t crb; |
| |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| crb = 31 - crbD(ctx->opcode); |
| gen_reset_fpstatus(); |
| if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { |
| TCGv_i32 t0; |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| t0 = tcg_const_i32(crb); |
| gen_helper_fpscr_clrbit(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); |
| tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); |
| } |
| } |
| |
| /* mtfsb1 */ |
| static void gen_mtfsb1(DisasContext *ctx) |
| { |
| uint8_t crb; |
| |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| crb = 31 - crbD(ctx->opcode); |
| gen_reset_fpstatus(); |
| /* XXX: we pretend we can only do IEEE floating-point computations */ |
| if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { |
| TCGv_i32 t0; |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| t0 = tcg_const_i32(crb); |
| gen_helper_fpscr_setbit(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); |
| tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); |
| } |
| /* We can raise a differed exception */ |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| /* mtfsf */ |
| static void gen_mtfsf(DisasContext *ctx) |
| { |
| TCGv_i32 t0; |
| int flm, l, w; |
| |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| flm = FPFLM(ctx->opcode); |
| l = FPL(ctx->opcode); |
| w = FPW(ctx->opcode); |
| if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { |
| gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
| return; |
| } |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| if (l) { |
| t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); |
| } else { |
| t0 = tcg_const_i32(flm << (w * 8)); |
| } |
| gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0); |
| tcg_temp_free_i32(t0); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); |
| tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); |
| } |
| /* We can raise a differed exception */ |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| /* mtfsfi */ |
| static void gen_mtfsfi(DisasContext *ctx) |
| { |
| int bf, sh, w; |
| TCGv_i64 t0; |
| TCGv_i32 t1; |
| |
| if (unlikely(!ctx->fpu_enabled)) { |
| gen_exception(ctx, POWERPC_EXCP_FPU); |
| return; |
| } |
| w = FPW(ctx->opcode); |
| bf = FPBF(ctx->opcode); |
| if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { |
| gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
| return; |
| } |
| sh = (8 * w) + 7 - bf; |
| /* NIP cannot be restored if the memory exception comes from an helper */ |
| gen_update_nip(ctx, ctx->nip - 4); |
| gen_reset_fpstatus(); |
| t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); |
| t1 = tcg_const_i32(1 << sh); |
| gen_helper_store_fpscr(cpu_env, t0, t1); |
| tcg_temp_free_i64(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); |
| tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); |
| } |
| /* We can raise a differed exception */ |
| gen_helper_float_check_status(cpu_env); |
| } |
| |
| /*** Addressing modes ***/ |
| /* Register indirect with immediate index : EA = (rA|0) + SIMM */ |
| static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA, |
| target_long maskl) |
| { |
| target_long simm = SIMM(ctx->opcode); |
| |
| simm &= ~maskl; |
| if (rA(ctx->opcode) == 0) { |
| if (NARROW_MODE(ctx)) { |
| simm = (uint32_t)simm; |
| } |
| tcg_gen_movi_tl(EA, simm); |
| } else if (likely(simm != 0)) { |
| tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm); |
| if (NARROW_MODE(ctx)) { |
| tcg_gen_ext32u_tl(EA, EA); |
| } |
| } else { |
| if (NARROW_MODE(ctx)) { |
| tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]); |
| } else { |
| tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| } |
| |
| static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA) |
| { |
| if (rA(ctx->opcode) == 0) { |
| if (NARROW_MODE(ctx)) { |
| tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]); |
| |