| /* |
| * S/390 translation |
| * |
| * Copyright (c) 2009 Ulrich Hecht |
| * Copyright (c) 2010 Alexander Graf |
| * |
| * This library is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU Lesser General Public |
| * License as published by the Free Software Foundation; either |
| * version 2 of the License, or (at your option) any later version. |
| * |
| * This library is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * Lesser General Public License for more details. |
| * |
| * You should have received a copy of the GNU Lesser General Public |
| * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| */ |
| |
| /* #define DEBUG_INLINE_BRANCHES */ |
| #define S390X_DEBUG_DISAS |
| /* #define S390X_DEBUG_DISAS_VERBOSE */ |
| |
| #ifdef S390X_DEBUG_DISAS_VERBOSE |
| # define LOG_DISAS(...) qemu_log(__VA_ARGS__) |
| #else |
| # define LOG_DISAS(...) do { } while (0) |
| #endif |
| |
| #include "qemu/osdep.h" |
| #include "cpu.h" |
| #include "disas/disas.h" |
| #include "exec/exec-all.h" |
| #include "tcg-op.h" |
| #include "qemu/log.h" |
| #include "qemu/host-utils.h" |
| #include "exec/cpu_ldst.h" |
| |
| /* global register indexes */ |
| static TCGv_env cpu_env; |
| |
| #include "exec/gen-icount.h" |
| #include "exec/helper-proto.h" |
| #include "exec/helper-gen.h" |
| |
| #include "trace-tcg.h" |
| #include "exec/log.h" |
| |
| |
| /* Information that (most) every instruction needs to manipulate. */ |
| typedef struct DisasContext DisasContext; |
| typedef struct DisasInsn DisasInsn; |
| typedef struct DisasFields DisasFields; |
| |
| struct DisasContext { |
| struct TranslationBlock *tb; |
| const DisasInsn *insn; |
| DisasFields *fields; |
| uint64_t pc, next_pc; |
| enum cc_op cc_op; |
| bool singlestep_enabled; |
| }; |
| |
| /* Information carried about a condition to be evaluated. */ |
| typedef struct { |
| TCGCond cond:8; |
| bool is_64; |
| bool g1; |
| bool g2; |
| union { |
| struct { TCGv_i64 a, b; } s64; |
| struct { TCGv_i32 a, b; } s32; |
| } u; |
| } DisasCompare; |
| |
| #define DISAS_EXCP 4 |
| |
| #ifdef DEBUG_INLINE_BRANCHES |
| static uint64_t inline_branch_hit[CC_OP_MAX]; |
| static uint64_t inline_branch_miss[CC_OP_MAX]; |
| #endif |
| |
| static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc) |
| { |
| if (!(s->tb->flags & FLAG_MASK_64)) { |
| if (s->tb->flags & FLAG_MASK_32) { |
| return pc | 0x80000000; |
| } |
| } |
| return pc; |
| } |
| |
| void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, |
| int flags) |
| { |
| S390CPU *cpu = S390_CPU(cs); |
| CPUS390XState *env = &cpu->env; |
| int i; |
| |
| if (env->cc_op > 3) { |
| cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n", |
| env->psw.mask, env->psw.addr, cc_name(env->cc_op)); |
| } else { |
| cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n", |
| env->psw.mask, env->psw.addr, env->cc_op); |
| } |
| |
| for (i = 0; i < 16; i++) { |
| cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]); |
| if ((i % 4) == 3) { |
| cpu_fprintf(f, "\n"); |
| } else { |
| cpu_fprintf(f, " "); |
| } |
| } |
| |
| for (i = 0; i < 16; i++) { |
| cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll); |
| if ((i % 4) == 3) { |
| cpu_fprintf(f, "\n"); |
| } else { |
| cpu_fprintf(f, " "); |
| } |
| } |
| |
| for (i = 0; i < 32; i++) { |
| cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i, |
| env->vregs[i][0].ll, env->vregs[i][1].ll); |
| cpu_fprintf(f, (i % 2) ? "\n" : " "); |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| for (i = 0; i < 16; i++) { |
| cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]); |
| if ((i % 4) == 3) { |
| cpu_fprintf(f, "\n"); |
| } else { |
| cpu_fprintf(f, " "); |
| } |
| } |
| #endif |
| |
| #ifdef DEBUG_INLINE_BRANCHES |
| for (i = 0; i < CC_OP_MAX; i++) { |
| cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i), |
| inline_branch_miss[i], inline_branch_hit[i]); |
| } |
| #endif |
| |
| cpu_fprintf(f, "\n"); |
| } |
| |
| static TCGv_i64 psw_addr; |
| static TCGv_i64 psw_mask; |
| static TCGv_i64 gbea; |
| |
| static TCGv_i32 cc_op; |
| static TCGv_i64 cc_src; |
| static TCGv_i64 cc_dst; |
| static TCGv_i64 cc_vr; |
| |
| static char cpu_reg_names[32][4]; |
| static TCGv_i64 regs[16]; |
| static TCGv_i64 fregs[16]; |
| |
| void s390x_translate_init(void) |
| { |
| int i; |
| |
| cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); |
| tcg_ctx.tcg_env = cpu_env; |
| psw_addr = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUS390XState, psw.addr), |
| "psw_addr"); |
| psw_mask = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUS390XState, psw.mask), |
| "psw_mask"); |
| gbea = tcg_global_mem_new_i64(cpu_env, |
| offsetof(CPUS390XState, gbea), |
| "gbea"); |
| |
| cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op), |
| "cc_op"); |
| cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src), |
| "cc_src"); |
| cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst), |
| "cc_dst"); |
| cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr), |
| "cc_vr"); |
| |
| for (i = 0; i < 16; i++) { |
| snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); |
| regs[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUS390XState, regs[i]), |
| cpu_reg_names[i]); |
| } |
| |
| for (i = 0; i < 16; i++) { |
| snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i); |
| fregs[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUS390XState, vregs[i][0].d), |
| cpu_reg_names[i + 16]); |
| } |
| } |
| |
| static TCGv_i64 load_reg(int reg) |
| { |
| TCGv_i64 r = tcg_temp_new_i64(); |
| tcg_gen_mov_i64(r, regs[reg]); |
| return r; |
| } |
| |
| static TCGv_i64 load_freg32_i64(int reg) |
| { |
| TCGv_i64 r = tcg_temp_new_i64(); |
| tcg_gen_shri_i64(r, fregs[reg], 32); |
| return r; |
| } |
| |
| static void store_reg(int reg, TCGv_i64 v) |
| { |
| tcg_gen_mov_i64(regs[reg], v); |
| } |
| |
| static void store_freg(int reg, TCGv_i64 v) |
| { |
| tcg_gen_mov_i64(fregs[reg], v); |
| } |
| |
| static void store_reg32_i64(int reg, TCGv_i64 v) |
| { |
| /* 32 bit register writes keep the upper half */ |
| tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); |
| } |
| |
| static void store_reg32h_i64(int reg, TCGv_i64 v) |
| { |
| tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32); |
| } |
| |
| static void store_freg32_i64(int reg, TCGv_i64 v) |
| { |
| tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32); |
| } |
| |
| static void return_low128(TCGv_i64 dest) |
| { |
| tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl)); |
| } |
| |
| static void update_psw_addr(DisasContext *s) |
| { |
| /* psw.addr */ |
| tcg_gen_movi_i64(psw_addr, s->pc); |
| } |
| |
| static void per_branch(DisasContext *s, bool to_next) |
| { |
| #ifndef CONFIG_USER_ONLY |
| tcg_gen_movi_i64(gbea, s->pc); |
| |
| if (s->tb->flags & FLAG_MASK_PER) { |
| TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr; |
| gen_helper_per_branch(cpu_env, gbea, next_pc); |
| if (to_next) { |
| tcg_temp_free_i64(next_pc); |
| } |
| } |
| #endif |
| } |
| |
| static void per_branch_cond(DisasContext *s, TCGCond cond, |
| TCGv_i64 arg1, TCGv_i64 arg2) |
| { |
| #ifndef CONFIG_USER_ONLY |
| if (s->tb->flags & FLAG_MASK_PER) { |
| TCGLabel *lab = gen_new_label(); |
| tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab); |
| |
| tcg_gen_movi_i64(gbea, s->pc); |
| gen_helper_per_branch(cpu_env, gbea, psw_addr); |
| |
| gen_set_label(lab); |
| } else { |
| TCGv_i64 pc = tcg_const_i64(s->pc); |
| tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc); |
| tcg_temp_free_i64(pc); |
| } |
| #endif |
| } |
| |
| static void per_breaking_event(DisasContext *s) |
| { |
| tcg_gen_movi_i64(gbea, s->pc); |
| } |
| |
| static void update_cc_op(DisasContext *s) |
| { |
| if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { |
| tcg_gen_movi_i32(cc_op, s->cc_op); |
| } |
| } |
| |
| static void potential_page_fault(DisasContext *s) |
| { |
| update_psw_addr(s); |
| update_cc_op(s); |
| } |
| |
| static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) |
| { |
| return (uint64_t)cpu_lduw_code(env, pc); |
| } |
| |
| static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) |
| { |
| return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); |
| } |
| |
| static int get_mem_index(DisasContext *s) |
| { |
| switch (s->tb->flags & FLAG_MASK_ASC) { |
| case PSW_ASC_PRIMARY >> 32: |
| return 0; |
| case PSW_ASC_SECONDARY >> 32: |
| return 1; |
| case PSW_ASC_HOME >> 32: |
| return 2; |
| default: |
| tcg_abort(); |
| break; |
| } |
| } |
| |
| static void gen_exception(int excp) |
| { |
| TCGv_i32 tmp = tcg_const_i32(excp); |
| gen_helper_exception(cpu_env, tmp); |
| tcg_temp_free_i32(tmp); |
| } |
| |
| static void gen_program_exception(DisasContext *s, int code) |
| { |
| TCGv_i32 tmp; |
| |
| /* Remember what pgm exeption this was. */ |
| tmp = tcg_const_i32(code); |
| tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); |
| tcg_temp_free_i32(tmp); |
| |
| tmp = tcg_const_i32(s->next_pc - s->pc); |
| tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); |
| tcg_temp_free_i32(tmp); |
| |
| /* Advance past instruction. */ |
| s->pc = s->next_pc; |
| update_psw_addr(s); |
| |
| /* Save off cc. */ |
| update_cc_op(s); |
| |
| /* Trigger exception. */ |
| gen_exception(EXCP_PGM); |
| } |
| |
| static inline void gen_illegal_opcode(DisasContext *s) |
| { |
| gen_program_exception(s, PGM_OPERATION); |
| } |
| |
| static inline void gen_trap(DisasContext *s) |
| { |
| TCGv_i32 t; |
| |
| /* Set DXC to 0xff. */ |
| t = tcg_temp_new_i32(); |
| tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc)); |
| tcg_gen_ori_i32(t, t, 0xff00); |
| tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc)); |
| tcg_temp_free_i32(t); |
| |
| gen_program_exception(s, PGM_DATA); |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static void check_privileged(DisasContext *s) |
| { |
| if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) { |
| gen_program_exception(s, PGM_PRIVILEGED); |
| } |
| } |
| #endif |
| |
| static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) |
| { |
| TCGv_i64 tmp = tcg_temp_new_i64(); |
| bool need_31 = !(s->tb->flags & FLAG_MASK_64); |
| |
| /* Note that d2 is limited to 20 bits, signed. If we crop negative |
| displacements early we create larger immedate addends. */ |
| |
| /* Note that addi optimizes the imm==0 case. */ |
| if (b2 && x2) { |
| tcg_gen_add_i64(tmp, regs[b2], regs[x2]); |
| tcg_gen_addi_i64(tmp, tmp, d2); |
| } else if (b2) { |
| tcg_gen_addi_i64(tmp, regs[b2], d2); |
| } else if (x2) { |
| tcg_gen_addi_i64(tmp, regs[x2], d2); |
| } else { |
| if (need_31) { |
| d2 &= 0x7fffffff; |
| need_31 = false; |
| } |
| tcg_gen_movi_i64(tmp, d2); |
| } |
| if (need_31) { |
| tcg_gen_andi_i64(tmp, tmp, 0x7fffffff); |
| } |
| |
| return tmp; |
| } |
| |
| static inline bool live_cc_data(DisasContext *s) |
| { |
| return (s->cc_op != CC_OP_DYNAMIC |
| && s->cc_op != CC_OP_STATIC |
| && s->cc_op > 3); |
| } |
| |
| static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) |
| { |
| if (live_cc_data(s)) { |
| tcg_gen_discard_i64(cc_src); |
| tcg_gen_discard_i64(cc_dst); |
| tcg_gen_discard_i64(cc_vr); |
| } |
| s->cc_op = CC_OP_CONST0 + val; |
| } |
| |
| static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) |
| { |
| if (live_cc_data(s)) { |
| tcg_gen_discard_i64(cc_src); |
| tcg_gen_discard_i64(cc_vr); |
| } |
| tcg_gen_mov_i64(cc_dst, dst); |
| s->cc_op = op; |
| } |
| |
| static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, |
| TCGv_i64 dst) |
| { |
| if (live_cc_data(s)) { |
| tcg_gen_discard_i64(cc_vr); |
| } |
| tcg_gen_mov_i64(cc_src, src); |
| tcg_gen_mov_i64(cc_dst, dst); |
| s->cc_op = op; |
| } |
| |
| static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, |
| TCGv_i64 dst, TCGv_i64 vr) |
| { |
| tcg_gen_mov_i64(cc_src, src); |
| tcg_gen_mov_i64(cc_dst, dst); |
| tcg_gen_mov_i64(cc_vr, vr); |
| s->cc_op = op; |
| } |
| |
| static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) |
| { |
| gen_op_update1_cc_i64(s, CC_OP_NZ, val); |
| } |
| |
| static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val) |
| { |
| gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val); |
| } |
| |
| static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val) |
| { |
| gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val); |
| } |
| |
| static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl) |
| { |
| gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl); |
| } |
| |
| /* CC value is in env->cc_op */ |
| static void set_cc_static(DisasContext *s) |
| { |
| if (live_cc_data(s)) { |
| tcg_gen_discard_i64(cc_src); |
| tcg_gen_discard_i64(cc_dst); |
| tcg_gen_discard_i64(cc_vr); |
| } |
| s->cc_op = CC_OP_STATIC; |
| } |
| |
| /* calculates cc into cc_op */ |
| static void gen_op_calc_cc(DisasContext *s) |
| { |
| TCGv_i32 local_cc_op; |
| TCGv_i64 dummy; |
| |
| TCGV_UNUSED_I32(local_cc_op); |
| TCGV_UNUSED_I64(dummy); |
| switch (s->cc_op) { |
| default: |
| dummy = tcg_const_i64(0); |
| /* FALLTHRU */ |
| case CC_OP_ADD_64: |
| case CC_OP_ADDU_64: |
| case CC_OP_ADDC_64: |
| case CC_OP_SUB_64: |
| case CC_OP_SUBU_64: |
| case CC_OP_SUBB_64: |
| case CC_OP_ADD_32: |
| case CC_OP_ADDU_32: |
| case CC_OP_ADDC_32: |
| case CC_OP_SUB_32: |
| case CC_OP_SUBU_32: |
| case CC_OP_SUBB_32: |
| local_cc_op = tcg_const_i32(s->cc_op); |
| break; |
| case CC_OP_CONST0: |
| case CC_OP_CONST1: |
| case CC_OP_CONST2: |
| case CC_OP_CONST3: |
| case CC_OP_STATIC: |
| case CC_OP_DYNAMIC: |
| break; |
| } |
| |
| switch (s->cc_op) { |
| case CC_OP_CONST0: |
| case CC_OP_CONST1: |
| case CC_OP_CONST2: |
| case CC_OP_CONST3: |
| /* s->cc_op is the cc value */ |
| tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); |
| break; |
| case CC_OP_STATIC: |
| /* env->cc_op already is the cc value */ |
| break; |
| case CC_OP_NZ: |
| case CC_OP_ABS_64: |
| case CC_OP_NABS_64: |
| case CC_OP_ABS_32: |
| case CC_OP_NABS_32: |
| case CC_OP_LTGT0_32: |
| case CC_OP_LTGT0_64: |
| case CC_OP_COMP_32: |
| case CC_OP_COMP_64: |
| case CC_OP_NZ_F32: |
| case CC_OP_NZ_F64: |
| case CC_OP_FLOGR: |
| /* 1 argument */ |
| gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy); |
| break; |
| case CC_OP_ICM: |
| case CC_OP_LTGT_32: |
| case CC_OP_LTGT_64: |
| case CC_OP_LTUGTU_32: |
| case CC_OP_LTUGTU_64: |
| case CC_OP_TM_32: |
| case CC_OP_TM_64: |
| case CC_OP_SLA_32: |
| case CC_OP_SLA_64: |
| case CC_OP_NZ_F128: |
| /* 2 arguments */ |
| gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy); |
| break; |
| case CC_OP_ADD_64: |
| case CC_OP_ADDU_64: |
| case CC_OP_ADDC_64: |
| case CC_OP_SUB_64: |
| case CC_OP_SUBU_64: |
| case CC_OP_SUBB_64: |
| case CC_OP_ADD_32: |
| case CC_OP_ADDU_32: |
| case CC_OP_ADDC_32: |
| case CC_OP_SUB_32: |
| case CC_OP_SUBU_32: |
| case CC_OP_SUBB_32: |
| /* 3 arguments */ |
| gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); |
| break; |
| case CC_OP_DYNAMIC: |
| /* unknown operation - assume 3 arguments and cc_op in env */ |
| gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr); |
| break; |
| default: |
| tcg_abort(); |
| } |
| |
| if (!TCGV_IS_UNUSED_I32(local_cc_op)) { |
| tcg_temp_free_i32(local_cc_op); |
| } |
| if (!TCGV_IS_UNUSED_I64(dummy)) { |
| tcg_temp_free_i64(dummy); |
| } |
| |
| /* We now have cc in cc_op as constant */ |
| set_cc_static(s); |
| } |
| |
| static int use_goto_tb(DisasContext *s, uint64_t dest) |
| { |
| if (unlikely(s->singlestep_enabled) || |
| (s->tb->cflags & CF_LAST_IO) || |
| (s->tb->flags & FLAG_MASK_PER)) { |
| return false; |
| } |
| #ifndef CONFIG_USER_ONLY |
| return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) || |
| (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK); |
| #else |
| return true; |
| #endif |
| } |
| |
| static void account_noninline_branch(DisasContext *s, int cc_op) |
| { |
| #ifdef DEBUG_INLINE_BRANCHES |
| inline_branch_miss[cc_op]++; |
| #endif |
| } |
| |
| static void account_inline_branch(DisasContext *s, int cc_op) |
| { |
| #ifdef DEBUG_INLINE_BRANCHES |
| inline_branch_hit[cc_op]++; |
| #endif |
| } |
| |
| /* Table of mask values to comparison codes, given a comparison as input. |
| For such, CC=3 should not be possible. */ |
| static const TCGCond ltgt_cond[16] = { |
| TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ |
| TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ |
| TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ |
| TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ |
| TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ |
| TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ |
| TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ |
| TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ |
| }; |
| |
| /* Table of mask values to comparison codes, given a logic op as input. |
| For such, only CC=0 and CC=1 should be possible. */ |
| static const TCGCond nz_cond[16] = { |
| TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ |
| TCG_COND_NEVER, TCG_COND_NEVER, |
| TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ |
| TCG_COND_NE, TCG_COND_NE, |
| TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ |
| TCG_COND_EQ, TCG_COND_EQ, |
| TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ |
| TCG_COND_ALWAYS, TCG_COND_ALWAYS, |
| }; |
| |
| /* Interpret MASK in terms of S->CC_OP, and fill in C with all the |
| details required to generate a TCG comparison. */ |
| static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) |
| { |
| TCGCond cond; |
| enum cc_op old_cc_op = s->cc_op; |
| |
| if (mask == 15 || mask == 0) { |
| c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); |
| c->u.s32.a = cc_op; |
| c->u.s32.b = cc_op; |
| c->g1 = c->g2 = true; |
| c->is_64 = false; |
| return; |
| } |
| |
| /* Find the TCG condition for the mask + cc op. */ |
| switch (old_cc_op) { |
| case CC_OP_LTGT0_32: |
| case CC_OP_LTGT0_64: |
| case CC_OP_LTGT_32: |
| case CC_OP_LTGT_64: |
| cond = ltgt_cond[mask]; |
| if (cond == TCG_COND_NEVER) { |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_LTUGTU_32: |
| case CC_OP_LTUGTU_64: |
| cond = tcg_unsigned_cond(ltgt_cond[mask]); |
| if (cond == TCG_COND_NEVER) { |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_NZ: |
| cond = nz_cond[mask]; |
| if (cond == TCG_COND_NEVER) { |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_TM_32: |
| case CC_OP_TM_64: |
| switch (mask) { |
| case 8: |
| cond = TCG_COND_EQ; |
| break; |
| case 4 | 2 | 1: |
| cond = TCG_COND_NE; |
| break; |
| default: |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_ICM: |
| switch (mask) { |
| case 8: |
| cond = TCG_COND_EQ; |
| break; |
| case 4 | 2 | 1: |
| case 4 | 2: |
| cond = TCG_COND_NE; |
| break; |
| default: |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_FLOGR: |
| switch (mask & 0xa) { |
| case 8: /* src == 0 -> no one bit found */ |
| cond = TCG_COND_EQ; |
| break; |
| case 2: /* src != 0 -> one bit found */ |
| cond = TCG_COND_NE; |
| break; |
| default: |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_ADDU_32: |
| case CC_OP_ADDU_64: |
| switch (mask) { |
| case 8 | 2: /* vr == 0 */ |
| cond = TCG_COND_EQ; |
| break; |
| case 4 | 1: /* vr != 0 */ |
| cond = TCG_COND_NE; |
| break; |
| case 8 | 4: /* no carry -> vr >= src */ |
| cond = TCG_COND_GEU; |
| break; |
| case 2 | 1: /* carry -> vr < src */ |
| cond = TCG_COND_LTU; |
| break; |
| default: |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| case CC_OP_SUBU_32: |
| case CC_OP_SUBU_64: |
| /* Note that CC=0 is impossible; treat it as dont-care. */ |
| switch (mask & 7) { |
| case 2: /* zero -> op1 == op2 */ |
| cond = TCG_COND_EQ; |
| break; |
| case 4 | 1: /* !zero -> op1 != op2 */ |
| cond = TCG_COND_NE; |
| break; |
| case 4: /* borrow (!carry) -> op1 < op2 */ |
| cond = TCG_COND_LTU; |
| break; |
| case 2 | 1: /* !borrow (carry) -> op1 >= op2 */ |
| cond = TCG_COND_GEU; |
| break; |
| default: |
| goto do_dynamic; |
| } |
| account_inline_branch(s, old_cc_op); |
| break; |
| |
| default: |
| do_dynamic: |
| /* Calculate cc value. */ |
| gen_op_calc_cc(s); |
| /* FALLTHRU */ |
| |
| case CC_OP_STATIC: |
| /* Jump based on CC. We'll load up the real cond below; |
| the assignment here merely avoids a compiler warning. */ |
| account_noninline_branch(s, old_cc_op); |
| old_cc_op = CC_OP_STATIC; |
| cond = TCG_COND_NEVER; |
| break; |
| } |
| |
| /* Load up the arguments of the comparison. */ |
| c->is_64 = true; |
| c->g1 = c->g2 = false; |
| switch (old_cc_op) { |
| case CC_OP_LTGT0_32: |
| c->is_64 = false; |
| c->u.s32.a = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); |
| c->u.s32.b = tcg_const_i32(0); |
| break; |
| case CC_OP_LTGT_32: |
| case CC_OP_LTUGTU_32: |
| case CC_OP_SUBU_32: |
| c->is_64 = false; |
| c->u.s32.a = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); |
| c->u.s32.b = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); |
| break; |
| |
| case CC_OP_LTGT0_64: |
| case CC_OP_NZ: |
| case CC_OP_FLOGR: |
| c->u.s64.a = cc_dst; |
| c->u.s64.b = tcg_const_i64(0); |
| c->g1 = true; |
| break; |
| case CC_OP_LTGT_64: |
| case CC_OP_LTUGTU_64: |
| case CC_OP_SUBU_64: |
| c->u.s64.a = cc_src; |
| c->u.s64.b = cc_dst; |
| c->g1 = c->g2 = true; |
| break; |
| |
| case CC_OP_TM_32: |
| case CC_OP_TM_64: |
| case CC_OP_ICM: |
| c->u.s64.a = tcg_temp_new_i64(); |
| c->u.s64.b = tcg_const_i64(0); |
| tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst); |
| break; |
| |
| case CC_OP_ADDU_32: |
| c->is_64 = false; |
| c->u.s32.a = tcg_temp_new_i32(); |
| c->u.s32.b = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr); |
| if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { |
| tcg_gen_movi_i32(c->u.s32.b, 0); |
| } else { |
| tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src); |
| } |
| break; |
| |
| case CC_OP_ADDU_64: |
| c->u.s64.a = cc_vr; |
| c->g1 = true; |
| if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { |
| c->u.s64.b = tcg_const_i64(0); |
| } else { |
| c->u.s64.b = cc_src; |
| c->g2 = true; |
| } |
| break; |
| |
| case CC_OP_STATIC: |
| c->is_64 = false; |
| c->u.s32.a = cc_op; |
| c->g1 = true; |
| switch (mask) { |
| case 0x8 | 0x4 | 0x2: /* cc != 3 */ |
| cond = TCG_COND_NE; |
| c->u.s32.b = tcg_const_i32(3); |
| break; |
| case 0x8 | 0x4 | 0x1: /* cc != 2 */ |
| cond = TCG_COND_NE; |
| c->u.s32.b = tcg_const_i32(2); |
| break; |
| case 0x8 | 0x2 | 0x1: /* cc != 1 */ |
| cond = TCG_COND_NE; |
| c->u.s32.b = tcg_const_i32(1); |
| break; |
| case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */ |
| cond = TCG_COND_EQ; |
| c->g1 = false; |
| c->u.s32.a = tcg_temp_new_i32(); |
| c->u.s32.b = tcg_const_i32(0); |
| tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); |
| break; |
| case 0x8 | 0x4: /* cc < 2 */ |
| cond = TCG_COND_LTU; |
| c->u.s32.b = tcg_const_i32(2); |
| break; |
| case 0x8: /* cc == 0 */ |
| cond = TCG_COND_EQ; |
| c->u.s32.b = tcg_const_i32(0); |
| break; |
| case 0x4 | 0x2 | 0x1: /* cc != 0 */ |
| cond = TCG_COND_NE; |
| c->u.s32.b = tcg_const_i32(0); |
| break; |
| case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */ |
| cond = TCG_COND_NE; |
| c->g1 = false; |
| c->u.s32.a = tcg_temp_new_i32(); |
| c->u.s32.b = tcg_const_i32(0); |
| tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); |
| break; |
| case 0x4: /* cc == 1 */ |
| cond = TCG_COND_EQ; |
| c->u.s32.b = tcg_const_i32(1); |
| break; |
| case 0x2 | 0x1: /* cc > 1 */ |
| cond = TCG_COND_GTU; |
| c->u.s32.b = tcg_const_i32(1); |
| break; |
| case 0x2: /* cc == 2 */ |
| cond = TCG_COND_EQ; |
| c->u.s32.b = tcg_const_i32(2); |
| break; |
| case 0x1: /* cc == 3 */ |
| cond = TCG_COND_EQ; |
| c->u.s32.b = tcg_const_i32(3); |
| break; |
| default: |
| /* CC is masked by something else: (8 >> cc) & mask. */ |
| cond = TCG_COND_NE; |
| c->g1 = false; |
| c->u.s32.a = tcg_const_i32(8); |
| c->u.s32.b = tcg_const_i32(0); |
| tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op); |
| tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask); |
| break; |
| } |
| break; |
| |
| default: |
| abort(); |
| } |
| c->cond = cond; |
| } |
| |
| static void free_compare(DisasCompare *c) |
| { |
| if (!c->g1) { |
| if (c->is_64) { |
| tcg_temp_free_i64(c->u.s64.a); |
| } else { |
| tcg_temp_free_i32(c->u.s32.a); |
| } |
| } |
| if (!c->g2) { |
| if (c->is_64) { |
| tcg_temp_free_i64(c->u.s64.b); |
| } else { |
| tcg_temp_free_i32(c->u.s32.b); |
| } |
| } |
| } |
| |
| /* ====================================================================== */ |
| /* Define the insn format enumeration. */ |
| #define F0(N) FMT_##N, |
| #define F1(N, X1) F0(N) |
| #define F2(N, X1, X2) F0(N) |
| #define F3(N, X1, X2, X3) F0(N) |
| #define F4(N, X1, X2, X3, X4) F0(N) |
| #define F5(N, X1, X2, X3, X4, X5) F0(N) |
| |
| typedef enum { |
| #include "insn-format.def" |
| } DisasFormat; |
| |
| #undef F0 |
| #undef F1 |
| #undef F2 |
| #undef F3 |
| #undef F4 |
| #undef F5 |
| |
| /* Define a structure to hold the decoded fields. We'll store each inside |
| an array indexed by an enum. In order to conserve memory, we'll arrange |
| for fields that do not exist at the same time to overlap, thus the "C" |
| for compact. For checking purposes there is an "O" for original index |
| as well that will be applied to availability bitmaps. */ |
| |
| enum DisasFieldIndexO { |
| FLD_O_r1, |
| FLD_O_r2, |
| FLD_O_r3, |
| FLD_O_m1, |
| FLD_O_m3, |
| FLD_O_m4, |
| FLD_O_b1, |
| FLD_O_b2, |
| FLD_O_b4, |
| FLD_O_d1, |
| FLD_O_d2, |
| FLD_O_d4, |
| FLD_O_x2, |
| FLD_O_l1, |
| FLD_O_l2, |
| FLD_O_i1, |
| FLD_O_i2, |
| FLD_O_i3, |
| FLD_O_i4, |
| FLD_O_i5 |
| }; |
| |
| enum DisasFieldIndexC { |
| FLD_C_r1 = 0, |
| FLD_C_m1 = 0, |
| FLD_C_b1 = 0, |
| FLD_C_i1 = 0, |
| |
| FLD_C_r2 = 1, |
| FLD_C_b2 = 1, |
| FLD_C_i2 = 1, |
| |
| FLD_C_r3 = 2, |
| FLD_C_m3 = 2, |
| FLD_C_i3 = 2, |
| |
| FLD_C_m4 = 3, |
| FLD_C_b4 = 3, |
| FLD_C_i4 = 3, |
| FLD_C_l1 = 3, |
| |
| FLD_C_i5 = 4, |
| FLD_C_d1 = 4, |
| |
| FLD_C_d2 = 5, |
| |
| FLD_C_d4 = 6, |
| FLD_C_x2 = 6, |
| FLD_C_l2 = 6, |
| |
| NUM_C_FIELD = 7 |
| }; |
| |
| struct DisasFields { |
| uint64_t raw_insn; |
| unsigned op:8; |
| unsigned op2:8; |
| unsigned presentC:16; |
| unsigned int presentO; |
| int c[NUM_C_FIELD]; |
| }; |
| |
| /* This is the way fields are to be accessed out of DisasFields. */ |
| #define have_field(S, F) have_field1((S), FLD_O_##F) |
| #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) |
| |
| static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c) |
| { |
| return (f->presentO >> c) & 1; |
| } |
| |
| static int get_field1(const DisasFields *f, enum DisasFieldIndexO o, |
| enum DisasFieldIndexC c) |
| { |
| assert(have_field1(f, o)); |
| return f->c[c]; |
| } |
| |
| /* Describe the layout of each field in each format. */ |
| typedef struct DisasField { |
| unsigned int beg:8; |
| unsigned int size:8; |
| unsigned int type:2; |
| unsigned int indexC:6; |
| enum DisasFieldIndexO indexO:8; |
| } DisasField; |
| |
| typedef struct DisasFormatInfo { |
| DisasField op[NUM_C_FIELD]; |
| } DisasFormatInfo; |
| |
| #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } |
| #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } |
| #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
| { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } |
| #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
| { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ |
| { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } |
| #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
| { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } |
| #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
| { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ |
| { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } |
| #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } |
| #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } |
| |
| #define F0(N) { { } }, |
| #define F1(N, X1) { { X1 } }, |
| #define F2(N, X1, X2) { { X1, X2 } }, |
| #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, |
| #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, |
| #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, |
| |
| static const DisasFormatInfo format_info[] = { |
| #include "insn-format.def" |
| }; |
| |
| #undef F0 |
| #undef F1 |
| #undef F2 |
| #undef F3 |
| #undef F4 |
| #undef F5 |
| #undef R |
| #undef M |
| #undef BD |
| #undef BXD |
| #undef BDL |
| #undef BXDL |
| #undef I |
| #undef L |
| |
| /* Generally, we'll extract operands into this structures, operate upon |
| them, and store them back. See the "in1", "in2", "prep", "wout" sets |
| of routines below for more details. */ |
| typedef struct { |
| bool g_out, g_out2, g_in1, g_in2; |
| TCGv_i64 out, out2, in1, in2; |
| TCGv_i64 addr1; |
| } DisasOps; |
| |
| /* Instructions can place constraints on their operands, raising specification |
| exceptions if they are violated. To make this easy to automate, each "in1", |
| "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one |
| of the following, or 0. To make this easy to document, we'll put the |
| SPEC_<name> defines next to <name>. */ |
| |
| #define SPEC_r1_even 1 |
| #define SPEC_r2_even 2 |
| #define SPEC_r3_even 4 |
| #define SPEC_r1_f128 8 |
| #define SPEC_r2_f128 16 |
| |
| /* Return values from translate_one, indicating the state of the TB. */ |
| typedef enum { |
| /* Continue the TB. */ |
| NO_EXIT, |
| /* We have emitted one or more goto_tb. No fixup required. */ |
| EXIT_GOTO_TB, |
| /* We are not using a goto_tb (for whatever reason), but have updated |
| the PC (for whatever reason), so there's no need to do it again on |
| exiting the TB. */ |
| EXIT_PC_UPDATED, |
| /* We are exiting the TB, but have neither emitted a goto_tb, nor |
| updated the PC for the next instruction to be executed. */ |
| EXIT_PC_STALE, |
| /* We are ending the TB with a noreturn function call, e.g. longjmp. |
| No following code will be executed. */ |
| EXIT_NORETURN, |
| } ExitStatus; |
| |
| typedef enum DisasFacility { |
| FAC_Z, /* zarch (default) */ |
| FAC_CASS, /* compare and swap and store */ |
| FAC_CASS2, /* compare and swap and store 2*/ |
| FAC_DFP, /* decimal floating point */ |
| FAC_DFPR, /* decimal floating point rounding */ |
| FAC_DO, /* distinct operands */ |
| FAC_EE, /* execute extensions */ |
| FAC_EI, /* extended immediate */ |
| FAC_FPE, /* floating point extension */ |
| FAC_FPSSH, /* floating point support sign handling */ |
| FAC_FPRGR, /* FPR-GR transfer */ |
| FAC_GIE, /* general instructions extension */ |
| FAC_HFP_MA, /* HFP multiply-and-add/subtract */ |
| FAC_HW, /* high-word */ |
| FAC_IEEEE_SIM, /* IEEE exception sumilation */ |
| FAC_MIE, /* miscellaneous-instruction-extensions */ |
| FAC_LAT, /* load-and-trap */ |
| FAC_LOC, /* load/store on condition */ |
| FAC_LD, /* long displacement */ |
| FAC_PC, /* population count */ |
| FAC_SCF, /* store clock fast */ |
| FAC_SFLE, /* store facility list extended */ |
| FAC_ILA, /* interlocked access facility 1 */ |
| } DisasFacility; |
| |
| struct DisasInsn { |
| unsigned opc:16; |
| DisasFormat fmt:8; |
| DisasFacility fac:8; |
| unsigned spec:8; |
| |
| const char *name; |
| |
| void (*help_in1)(DisasContext *, DisasFields *, DisasOps *); |
| void (*help_in2)(DisasContext *, DisasFields *, DisasOps *); |
| void (*help_prep)(DisasContext *, DisasFields *, DisasOps *); |
| void (*help_wout)(DisasContext *, DisasFields *, DisasOps *); |
| void (*help_cout)(DisasContext *, DisasOps *); |
| ExitStatus (*help_op)(DisasContext *, DisasOps *); |
| |
| uint64_t data; |
| }; |
| |
| /* ====================================================================== */ |
| /* Miscellaneous helpers, used by several operations. */ |
| |
| static void help_l2_shift(DisasContext *s, DisasFields *f, |
| DisasOps *o, int mask) |
| { |
| int b2 = get_field(f, b2); |
| int d2 = get_field(f, d2); |
| |
| if (b2 == 0) { |
| o->in2 = tcg_const_i64(d2 & mask); |
| } else { |
| o->in2 = get_address(s, 0, b2, d2); |
| tcg_gen_andi_i64(o->in2, o->in2, mask); |
| } |
| } |
| |
| static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest) |
| { |
| if (dest == s->next_pc) { |
| per_branch(s, true); |
| return NO_EXIT; |
| } |
| if (use_goto_tb(s, dest)) { |
| update_cc_op(s); |
| per_breaking_event(s); |
| tcg_gen_goto_tb(0); |
| tcg_gen_movi_i64(psw_addr, dest); |
| tcg_gen_exit_tb((uintptr_t)s->tb); |
| return EXIT_GOTO_TB; |
| } else { |
| tcg_gen_movi_i64(psw_addr, dest); |
| per_branch(s, false); |
| return EXIT_PC_UPDATED; |
| } |
| } |
| |
| static ExitStatus help_branch(DisasContext *s, DisasCompare *c, |
| bool is_imm, int imm, TCGv_i64 cdest) |
| { |
| ExitStatus ret; |
| uint64_t dest = s->pc + 2 * imm; |
| TCGLabel *lab; |
| |
| /* Take care of the special cases first. */ |
| if (c->cond == TCG_COND_NEVER) { |
| ret = NO_EXIT; |
| goto egress; |
| } |
| if (is_imm) { |
| if (dest == s->next_pc) { |
| /* Branch to next. */ |
| per_branch(s, true); |
| ret = NO_EXIT; |
| goto egress; |
| } |
| if (c->cond == TCG_COND_ALWAYS) { |
| ret = help_goto_direct(s, dest); |
| goto egress; |
| } |
| } else { |
| if (TCGV_IS_UNUSED_I64(cdest)) { |
| /* E.g. bcr %r0 -> no branch. */ |
| ret = NO_EXIT; |
| goto egress; |
| } |
| if (c->cond == TCG_COND_ALWAYS) { |
| tcg_gen_mov_i64(psw_addr, cdest); |
| per_branch(s, false); |
| ret = EXIT_PC_UPDATED; |
| goto egress; |
| } |
| } |
| |
| if (use_goto_tb(s, s->next_pc)) { |
| if (is_imm && use_goto_tb(s, dest)) { |
| /* Both exits can use goto_tb. */ |
| update_cc_op(s); |
| |
| lab = gen_new_label(); |
| if (c->is_64) { |
| tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); |
| } else { |
| tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); |
| } |
| |
| /* Branch not taken. */ |
| tcg_gen_goto_tb(0); |
| tcg_gen_movi_i64(psw_addr, s->next_pc); |
| tcg_gen_exit_tb((uintptr_t)s->tb + 0); |
| |
| /* Branch taken. */ |
| gen_set_label(lab); |
| per_breaking_event(s); |
| tcg_gen_goto_tb(1); |
| tcg_gen_movi_i64(psw_addr, dest); |
| tcg_gen_exit_tb((uintptr_t)s->tb + 1); |
| |
| ret = EXIT_GOTO_TB; |
| } else { |
| /* Fallthru can use goto_tb, but taken branch cannot. */ |
| /* Store taken branch destination before the brcond. This |
| avoids having to allocate a new local temp to hold it. |
| We'll overwrite this in the not taken case anyway. */ |
| if (!is_imm) { |
| tcg_gen_mov_i64(psw_addr, cdest); |
| } |
| |
| lab = gen_new_label(); |
| if (c->is_64) { |
| tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); |
| } else { |
| tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); |
| } |
| |
| /* Branch not taken. */ |
| update_cc_op(s); |
| tcg_gen_goto_tb(0); |
| tcg_gen_movi_i64(psw_addr, s->next_pc); |
| tcg_gen_exit_tb((uintptr_t)s->tb + 0); |
| |
| gen_set_label(lab); |
| if (is_imm) { |
| tcg_gen_movi_i64(psw_addr, dest); |
| } |
| per_breaking_event(s); |
| ret = EXIT_PC_UPDATED; |
| } |
| } else { |
| /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. |
| Most commonly we're single-stepping or some other condition that |
| disables all use of goto_tb. Just update the PC and exit. */ |
| |
| TCGv_i64 next = tcg_const_i64(s->next_pc); |
| if (is_imm) { |
| cdest = tcg_const_i64(dest); |
| } |
| |
| if (c->is_64) { |
| tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b, |
| cdest, next); |
| per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); |
| } else { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| TCGv_i64 z = tcg_const_i64(0); |
| tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); |
| tcg_gen_extu_i32_i64(t1, t0); |
| tcg_temp_free_i32(t0); |
| tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); |
| per_branch_cond(s, TCG_COND_NE, t1, z); |
| tcg_temp_free_i64(t1); |
| tcg_temp_free_i64(z); |
| } |
| |
| if (is_imm) { |
| tcg_temp_free_i64(cdest); |
| } |
| tcg_temp_free_i64(next); |
| |
| ret = EXIT_PC_UPDATED; |
| } |
| |
| egress: |
| free_compare(c); |
| return ret; |
| } |
| |
| /* ====================================================================== */ |
| /* The operations. These perform the bulk of the work for any insn, |
| usually after the operands have been loaded and output initialized. */ |
| |
| static ExitStatus op_abs(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 z, n; |
| z = tcg_const_i64(0); |
| n = tcg_temp_new_i64(); |
| tcg_gen_neg_i64(n, o->in2); |
| tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2); |
| tcg_temp_free_i64(n); |
| tcg_temp_free_i64(z); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_absf32(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_absf64(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_absf128(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull); |
| tcg_gen_mov_i64(o->out2, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_add(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_add_i64(o->out, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_addc(DisasContext *s, DisasOps *o) |
| { |
| DisasCompare cmp; |
| TCGv_i64 carry; |
| |
| tcg_gen_add_i64(o->out, o->in1, o->in2); |
| |
| /* The carry flag is the msb of CC, therefore the branch mask that would |
| create that comparison is 3. Feeding the generated comparison to |
| setcond produces the carry flag that we desire. */ |
| disas_jcc(s, &cmp, 3); |
| carry = tcg_temp_new_i64(); |
| if (cmp.is_64) { |
| tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b); |
| } else { |
| TCGv_i32 t = tcg_temp_new_i32(); |
| tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); |
| tcg_gen_extu_i32_i64(carry, t); |
| tcg_temp_free_i32(t); |
| } |
| free_compare(&cmp); |
| |
| tcg_gen_add_i64(o->out, o->out, carry); |
| tcg_temp_free_i64(carry); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_aeb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_aeb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_adb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_adb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_axb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_and(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_and_i64(o->out, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_andi(DisasContext *s, DisasOps *o) |
| { |
| int shift = s->insn->data & 0xff; |
| int size = s->insn->data >> 8; |
| uint64_t mask = ((1ull << size) - 1) << shift; |
| |
| assert(!o->g_in2); |
| tcg_gen_shli_i64(o->in2, o->in2, shift); |
| tcg_gen_ori_i64(o->in2, o->in2, ~mask); |
| tcg_gen_and_i64(o->out, o->in1, o->in2); |
| |
| /* Produce the CC from only the bits manipulated. */ |
| tcg_gen_andi_i64(cc_dst, o->out, mask); |
| set_cc_nz_u64(s, cc_dst); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_bas(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc)); |
| if (!TCGV_IS_UNUSED_I64(o->in2)) { |
| tcg_gen_mov_i64(psw_addr, o->in2); |
| per_branch(s, false); |
| return EXIT_PC_UPDATED; |
| } else { |
| return NO_EXIT; |
| } |
| } |
| |
| static ExitStatus op_basi(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc)); |
| return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2)); |
| } |
| |
| static ExitStatus op_bc(DisasContext *s, DisasOps *o) |
| { |
| int m1 = get_field(s->fields, m1); |
| bool is_imm = have_field(s->fields, i2); |
| int imm = is_imm ? get_field(s->fields, i2) : 0; |
| DisasCompare c; |
| |
| disas_jcc(s, &c, m1); |
| return help_branch(s, &c, is_imm, imm, o->in2); |
| } |
| |
| static ExitStatus op_bct32(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| bool is_imm = have_field(s->fields, i2); |
| int imm = is_imm ? get_field(s->fields, i2) : 0; |
| DisasCompare c; |
| TCGv_i64 t; |
| |
| c.cond = TCG_COND_NE; |
| c.is_64 = false; |
| c.g1 = false; |
| c.g2 = false; |
| |
| t = tcg_temp_new_i64(); |
| tcg_gen_subi_i64(t, regs[r1], 1); |
| store_reg32_i64(r1, t); |
| c.u.s32.a = tcg_temp_new_i32(); |
| c.u.s32.b = tcg_const_i32(0); |
| tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
| tcg_temp_free_i64(t); |
| |
| return help_branch(s, &c, is_imm, imm, o->in2); |
| } |
| |
| static ExitStatus op_bcth(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int imm = get_field(s->fields, i2); |
| DisasCompare c; |
| TCGv_i64 t; |
| |
| c.cond = TCG_COND_NE; |
| c.is_64 = false; |
| c.g1 = false; |
| c.g2 = false; |
| |
| t = tcg_temp_new_i64(); |
| tcg_gen_shri_i64(t, regs[r1], 32); |
| tcg_gen_subi_i64(t, t, 1); |
| store_reg32h_i64(r1, t); |
| c.u.s32.a = tcg_temp_new_i32(); |
| c.u.s32.b = tcg_const_i32(0); |
| tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
| tcg_temp_free_i64(t); |
| |
| return help_branch(s, &c, 1, imm, o->in2); |
| } |
| |
| static ExitStatus op_bct64(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| bool is_imm = have_field(s->fields, i2); |
| int imm = is_imm ? get_field(s->fields, i2) : 0; |
| DisasCompare c; |
| |
| c.cond = TCG_COND_NE; |
| c.is_64 = true; |
| c.g1 = true; |
| c.g2 = false; |
| |
| tcg_gen_subi_i64(regs[r1], regs[r1], 1); |
| c.u.s64.a = regs[r1]; |
| c.u.s64.b = tcg_const_i64(0); |
| |
| return help_branch(s, &c, is_imm, imm, o->in2); |
| } |
| |
| static ExitStatus op_bx32(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| bool is_imm = have_field(s->fields, i2); |
| int imm = is_imm ? get_field(s->fields, i2) : 0; |
| DisasCompare c; |
| TCGv_i64 t; |
| |
| c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); |
| c.is_64 = false; |
| c.g1 = false; |
| c.g2 = false; |
| |
| t = tcg_temp_new_i64(); |
| tcg_gen_add_i64(t, regs[r1], regs[r3]); |
| c.u.s32.a = tcg_temp_new_i32(); |
| c.u.s32.b = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
| tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); |
| store_reg32_i64(r1, t); |
| tcg_temp_free_i64(t); |
| |
| return help_branch(s, &c, is_imm, imm, o->in2); |
| } |
| |
| static ExitStatus op_bx64(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| bool is_imm = have_field(s->fields, i2); |
| int imm = is_imm ? get_field(s->fields, i2) : 0; |
| DisasCompare c; |
| |
| c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); |
| c.is_64 = true; |
| |
| if (r1 == (r3 | 1)) { |
| c.u.s64.b = load_reg(r3 | 1); |
| c.g2 = false; |
| } else { |
| c.u.s64.b = regs[r3 | 1]; |
| c.g2 = true; |
| } |
| |
| tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); |
| c.u.s64.a = regs[r1]; |
| c.g1 = true; |
| |
| return help_branch(s, &c, is_imm, imm, o->in2); |
| } |
| |
| static ExitStatus op_cj(DisasContext *s, DisasOps *o) |
| { |
| int imm, m3 = get_field(s->fields, m3); |
| bool is_imm; |
| DisasCompare c; |
| |
| c.cond = ltgt_cond[m3]; |
| if (s->insn->data) { |
| c.cond = tcg_unsigned_cond(c.cond); |
| } |
| c.is_64 = c.g1 = c.g2 = true; |
| c.u.s64.a = o->in1; |
| c.u.s64.b = o->in2; |
| |
| is_imm = have_field(s->fields, i4); |
| if (is_imm) { |
| imm = get_field(s->fields, i4); |
| } else { |
| imm = 0; |
| o->out = get_address(s, 0, get_field(s->fields, b4), |
| get_field(s->fields, d4)); |
| } |
| |
| return help_branch(s, &c, is_imm, imm, o->out); |
| } |
| |
| static ExitStatus op_ceb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cdb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cxb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cfeb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cfeb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f32(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cfdb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cfdb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f64(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cfxb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f128(s, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cgeb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cgeb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f32(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cgdb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cgdb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f64(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cgxb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f128(s, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clfeb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clfeb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f32(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clfdb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clfdb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f64(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clfxb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f128(s, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clgeb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clgeb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f32(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clgdb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clgdb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f64(s, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clgxb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| gen_set_cc_nz_f128(s, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cegb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cegb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cdgb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cdgb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cxgb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cxgb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_celgb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_celgb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cdlgb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_cxlgb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cksm(DisasContext *s, DisasOps *o) |
| { |
| int r2 = get_field(s->fields, r2); |
| TCGv_i64 len = tcg_temp_new_i64(); |
| |
| potential_page_fault(s); |
| gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]); |
| set_cc_static(s); |
| return_low128(o->out); |
| |
| tcg_gen_add_i64(regs[r2], regs[r2], len); |
| tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); |
| tcg_temp_free_i64(len); |
| |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clc(DisasContext *s, DisasOps *o) |
| { |
| int l = get_field(s->fields, l1); |
| TCGv_i32 vl; |
| |
| switch (l + 1) { |
| case 1: |
| tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); |
| tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); |
| break; |
| case 2: |
| tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); |
| tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); |
| break; |
| case 4: |
| tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); |
| tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); |
| break; |
| case 8: |
| tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); |
| tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); |
| break; |
| default: |
| potential_page_fault(s); |
| vl = tcg_const_i32(l); |
| gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); |
| tcg_temp_free_i32(vl); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clcle(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| potential_page_fault(s); |
| gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r3); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clm(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(t1, o->in1); |
| potential_page_fault(s); |
| gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); |
| set_cc_static(s); |
| tcg_temp_free_i32(t1); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_clst(DisasContext *s, DisasOps *o) |
| { |
| potential_page_fault(s); |
| gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2); |
| set_cc_static(s); |
| return_low128(o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cps(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 t = tcg_temp_new_i64(); |
| tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); |
| tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); |
| tcg_gen_or_i64(o->out, o->out, t); |
| tcg_temp_free_i64(t); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cs(DisasContext *s, DisasOps *o) |
| { |
| /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */ |
| int d2 = get_field(s->fields, d2); |
| int b2 = get_field(s->fields, b2); |
| int is_64 = s->insn->data; |
| TCGv_i64 addr, mem, cc, z; |
| |
| /* Note that in1 = R3 (new value) and |
| in2 = (zero-extended) R1 (expected value). */ |
| |
| /* Load the memory into the (temporary) output. While the PoO only talks |
| about moving the memory to R1 on inequality, if we include equality it |
| means that R1 is equal to the memory in all conditions. */ |
| addr = get_address(s, 0, b2, d2); |
| if (is_64) { |
| tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s)); |
| } else { |
| tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s)); |
| } |
| |
| /* Are the memory and expected values (un)equal? Note that this setcond |
| produces the output CC value, thus the NE sense of the test. */ |
| cc = tcg_temp_new_i64(); |
| tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); |
| |
| /* If the memory and expected values are equal (CC==0), copy R3 to MEM. |
| Recall that we are allowed to unconditionally issue the store (and |
| thus any possible write trap), so (re-)store the original contents |
| of MEM in case of inequality. */ |
| z = tcg_const_i64(0); |
| mem = tcg_temp_new_i64(); |
| tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out); |
| if (is_64) { |
| tcg_gen_qemu_st64(mem, addr, get_mem_index(s)); |
| } else { |
| tcg_gen_qemu_st32(mem, addr, get_mem_index(s)); |
| } |
| tcg_temp_free_i64(z); |
| tcg_temp_free_i64(mem); |
| tcg_temp_free_i64(addr); |
| |
| /* Store CC back to cc_op. Wait until after the store so that any |
| exception gets the old cc_op value. */ |
| tcg_gen_extrl_i64_i32(cc_op, cc); |
| tcg_temp_free_i64(cc); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_cdsg(DisasContext *s, DisasOps *o) |
| { |
| /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */ |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| int d2 = get_field(s->fields, d2); |
| int b2 = get_field(s->fields, b2); |
| TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z; |
| |
| /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ |
| |
| addrh = get_address(s, 0, b2, d2); |
| addrl = get_address(s, 0, b2, d2 + 8); |
| outh = tcg_temp_new_i64(); |
| outl = tcg_temp_new_i64(); |
| |
| tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s)); |
| tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s)); |
| |
| /* Fold the double-word compare with arithmetic. */ |
| cc = tcg_temp_new_i64(); |
| z = tcg_temp_new_i64(); |
| tcg_gen_xor_i64(cc, outh, regs[r1]); |
| tcg_gen_xor_i64(z, outl, regs[r1 + 1]); |
| tcg_gen_or_i64(cc, cc, z); |
| tcg_gen_movi_i64(z, 0); |
| tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z); |
| |
| memh = tcg_temp_new_i64(); |
| meml = tcg_temp_new_i64(); |
| tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh); |
| tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl); |
| tcg_temp_free_i64(z); |
| |
| tcg_gen_qemu_st64(memh, addrh, get_mem_index(s)); |
| tcg_gen_qemu_st64(meml, addrl, get_mem_index(s)); |
| tcg_temp_free_i64(memh); |
| tcg_temp_free_i64(meml); |
| tcg_temp_free_i64(addrh); |
| tcg_temp_free_i64(addrl); |
| |
| /* Save back state now that we've passed all exceptions. */ |
| tcg_gen_mov_i64(regs[r1], outh); |
| tcg_gen_mov_i64(regs[r1 + 1], outl); |
| tcg_gen_extrl_i64_i32(cc_op, cc); |
| tcg_temp_free_i64(outh); |
| tcg_temp_free_i64(outl); |
| tcg_temp_free_i64(cc); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_csp(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| check_privileged(s); |
| gen_helper_csp(cc_op, cpu_env, r1, o->in2); |
| tcg_temp_free_i32(r1); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| #endif |
| |
| static ExitStatus op_cvd(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| TCGv_i32 t2 = tcg_temp_new_i32(); |
| tcg_gen_extrl_i64_i32(t2, o->in1); |
| gen_helper_cvd(t1, t2); |
| tcg_temp_free_i32(t2); |
| tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); |
| tcg_temp_free_i64(t1); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ct(DisasContext *s, DisasOps *o) |
| { |
| int m3 = get_field(s->fields, m3); |
| TCGLabel *lab = gen_new_label(); |
| TCGCond c; |
| |
| c = tcg_invert_cond(ltgt_cond[m3]); |
| if (s->insn->data) { |
| c = tcg_unsigned_cond(c); |
| } |
| tcg_gen_brcond_i64(c, o->in1, o->in2, lab); |
| |
| /* Trap. */ |
| gen_trap(s); |
| |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_diag(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2)); |
| |
| check_privileged(s); |
| update_psw_addr(s); |
| gen_op_calc_cc(s); |
| |
| gen_helper_diag(cpu_env, r1, r3, func_code); |
| |
| tcg_temp_free_i32(func_code); |
| tcg_temp_free_i32(r3); |
| tcg_temp_free_i32(r1); |
| return NO_EXIT; |
| } |
| #endif |
| |
| static ExitStatus op_divs32(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2); |
| return_low128(o->out); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_divu32(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2); |
| return_low128(o->out); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_divs64(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2); |
| return_low128(o->out); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_divu64(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2); |
| return_low128(o->out); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_deb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_deb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ddb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_ddb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_dxb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ear(DisasContext *s, DisasOps *o) |
| { |
| int r2 = get_field(s->fields, r2); |
| tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2])); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ecag(DisasContext *s, DisasOps *o) |
| { |
| /* No cache information provided. */ |
| tcg_gen_movi_i64(o->out, -1); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_efpc(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_epsw(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r2 = get_field(s->fields, r2); |
| TCGv_i64 t = tcg_temp_new_i64(); |
| |
| /* Note the "subsequently" in the PoO, which implies a defined result |
| if r1 == r2. Thus we cannot defer these writes to an output hook. */ |
| tcg_gen_shri_i64(t, psw_mask, 32); |
| store_reg32_i64(r1, t); |
| if (r2 != 0) { |
| store_reg32_i64(r2, psw_mask); |
| } |
| |
| tcg_temp_free_i64(t); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ex(DisasContext *s, DisasOps *o) |
| { |
| /* ??? Perhaps a better way to implement EXECUTE is to set a bit in |
| tb->flags, (ab)use the tb->cs_base field as the address of |
| the template in memory, and grab 8 bits of tb->flags/cflags for |
| the contents of the register. We would then recognize all this |
| in gen_intermediate_code_internal, generating code for exactly |
| one instruction. This new TB then gets executed normally. |
| |
| On the other hand, this seems to be mostly used for modifying |
| MVC inside of memcpy, which needs a helper call anyway. So |
| perhaps this doesn't bear thinking about any further. */ |
| |
| TCGv_i64 tmp; |
| |
| update_psw_addr(s); |
| gen_op_calc_cc(s); |
| |
| tmp = tcg_const_i64(s->next_pc); |
| gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp); |
| tcg_temp_free_i64(tmp); |
| |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_fieb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_fieb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_fidb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_fidb(o->out, cpu_env, o->in2, m3); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_fixb(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
| gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3); |
| return_low128(o->out2); |
| tcg_temp_free_i32(m3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_flogr(DisasContext *s, DisasOps *o) |
| { |
| /* We'll use the original input for cc computation, since we get to |
| compare that against 0, which ought to be better than comparing |
| the real output against 64. It also lets cc_dst be a convenient |
| temporary during our computation. */ |
| gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); |
| |
| /* R1 = IN ? CLZ(IN) : 64. */ |
| gen_helper_clz(o->out, o->in2); |
| |
| /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this |
| value by 64, which is undefined. But since the shift is 64 iff the |
| input is zero, we still get the correct result after and'ing. */ |
| tcg_gen_movi_i64(o->out2, 0x8000000000000000ull); |
| tcg_gen_shr_i64(o->out2, o->out2, o->out); |
| tcg_gen_andc_i64(o->out2, cc_dst, o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_icm(DisasContext *s, DisasOps *o) |
| { |
| int m3 = get_field(s->fields, m3); |
| int pos, len, base = s->insn->data; |
| TCGv_i64 tmp = tcg_temp_new_i64(); |
| uint64_t ccm; |
| |
| switch (m3) { |
| case 0xf: |
| /* Effectively a 32-bit load. */ |
| tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); |
| len = 32; |
| goto one_insert; |
| |
| case 0xc: |
| case 0x6: |
| case 0x3: |
| /* Effectively a 16-bit load. */ |
| tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); |
| len = 16; |
| goto one_insert; |
| |
| case 0x8: |
| case 0x4: |
| case 0x2: |
| case 0x1: |
| /* Effectively an 8-bit load. */ |
| tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); |
| len = 8; |
| goto one_insert; |
| |
| one_insert: |
| pos = base + ctz32(m3) * 8; |
| tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len); |
| ccm = ((1ull << len) - 1) << pos; |
| break; |
| |
| default: |
| /* This is going to be a sequence of loads and inserts. */ |
| pos = base + 32 - 8; |
| ccm = 0; |
| while (m3) { |
| if (m3 & 0x8) { |
| tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(o->in2, o->in2, 1); |
| tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); |
| ccm |= 0xff << pos; |
| } |
| m3 = (m3 << 1) & 0xf; |
| pos -= 8; |
| } |
| break; |
| } |
| |
| tcg_gen_movi_i64(tmp, ccm); |
| gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); |
| tcg_temp_free_i64(tmp); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_insi(DisasContext *s, DisasOps *o) |
| { |
| int shift = s->insn->data & 0xff; |
| int size = s->insn->data >> 8; |
| tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ipm(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 t1; |
| |
| gen_op_calc_cc(s); |
| tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull); |
| |
| t1 = tcg_temp_new_i64(); |
| tcg_gen_shli_i64(t1, psw_mask, 20); |
| tcg_gen_shri_i64(t1, t1, 36); |
| tcg_gen_or_i64(o->out, o->out, t1); |
| |
| tcg_gen_extu_i32_i64(t1, cc_op); |
| tcg_gen_shli_i64(t1, t1, 28); |
| tcg_gen_or_i64(o->out, o->out, t1); |
| tcg_temp_free_i64(t1); |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_ipte(DisasContext *s, DisasOps *o) |
| { |
| check_privileged(s); |
| gen_helper_ipte(cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_iske(DisasContext *s, DisasOps *o) |
| { |
| check_privileged(s); |
| gen_helper_iske(o->out, cpu_env, o->in2); |
| return NO_EXIT; |
| } |
| #endif |
| |
| static ExitStatus op_ldeb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_ldeb(o->out, cpu_env, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ledb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_ledb(o->out, cpu_env, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ldxb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lexb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_lexb(o->out, cpu_env, o->in1, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lxdb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_lxdb(o->out, cpu_env, o->in2); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lxeb(DisasContext *s, DisasOps *o) |
| { |
| gen_helper_lxeb(o->out, cpu_env, o->in2); |
| return_low128(o->out2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_llgt(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld8s(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld8u(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld16s(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld16u(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld32s(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld32u(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_ld64(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lat(DisasContext *s, DisasOps *o) |
| { |
| TCGLabel *lab = gen_new_label(); |
| store_reg32_i64(get_field(s->fields, r1), o->in2); |
| /* The value is stored even in case of trap. */ |
| tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); |
| gen_trap(s); |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lgat(DisasContext *s, DisasOps *o) |
| { |
| TCGLabel *lab = gen_new_label(); |
| tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); |
| /* The value is stored even in case of trap. */ |
| tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
| gen_trap(s); |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lfhat(DisasContext *s, DisasOps *o) |
| { |
| TCGLabel *lab = gen_new_label(); |
| store_reg32h_i64(get_field(s->fields, r1), o->in2); |
| /* The value is stored even in case of trap. */ |
| tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); |
| gen_trap(s); |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_llgfat(DisasContext *s, DisasOps *o) |
| { |
| TCGLabel *lab = gen_new_label(); |
| tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); |
| /* The value is stored even in case of trap. */ |
| tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
| gen_trap(s); |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_llgtat(DisasContext *s, DisasOps *o) |
| { |
| TCGLabel *lab = gen_new_label(); |
| tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); |
| /* The value is stored even in case of trap. */ |
| tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
| gen_trap(s); |
| gen_set_label(lab); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_loc(DisasContext *s, DisasOps *o) |
| { |
| DisasCompare c; |
| |
| disas_jcc(s, &c, get_field(s->fields, m3)); |
| |
| if (c.is_64) { |
| tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, |
| o->in2, o->in1); |
| free_compare(&c); |
| } else { |
| TCGv_i32 t32 = tcg_temp_new_i32(); |
| TCGv_i64 t, z; |
| |
| tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); |
| free_compare(&c); |
| |
| t = tcg_temp_new_i64(); |
| tcg_gen_extu_i32_i64(t, t32); |
| tcg_temp_free_i32(t32); |
| |
| z = tcg_const_i64(0); |
| tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); |
| tcg_temp_free_i64(t); |
| tcg_temp_free_i64(z); |
| } |
| |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_lctl(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_lctl(cpu_env, r1, o->in2, r3); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lctlg(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_lctlg(cpu_env, r1, o->in2, r3); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r3); |
| return NO_EXIT; |
| } |
| static ExitStatus op_lra(DisasContext *s, DisasOps *o) |
| { |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_lra(o->out, cpu_env, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lpsw(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 t1, t2; |
| |
| check_privileged(s); |
| per_breaking_event(s); |
| |
| t1 = tcg_temp_new_i64(); |
| t2 = tcg_temp_new_i64(); |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(o->in2, o->in2, 4); |
| tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); |
| /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ |
| tcg_gen_shli_i64(t1, t1, 32); |
| gen_helper_load_psw(cpu_env, t1, t2); |
| tcg_temp_free_i64(t1); |
| tcg_temp_free_i64(t2); |
| return EXIT_NORETURN; |
| } |
| |
| static ExitStatus op_lpswe(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i64 t1, t2; |
| |
| check_privileged(s); |
| per_breaking_event(s); |
| |
| t1 = tcg_temp_new_i64(); |
| t2 = tcg_temp_new_i64(); |
| tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(o->in2, o->in2, 8); |
| tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); |
| gen_helper_load_psw(cpu_env, t1, t2); |
| tcg_temp_free_i64(t1); |
| tcg_temp_free_i64(t2); |
| return EXIT_NORETURN; |
| } |
| #endif |
| |
| static ExitStatus op_lam(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| potential_page_fault(s); |
| gen_helper_lam(cpu_env, r1, o->in2, r3); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r3); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lm32(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| TCGv_i64 t1, t2; |
| |
| /* Only one register to read. */ |
| t1 = tcg_temp_new_i64(); |
| if (unlikely(r1 == r3)) { |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| store_reg32_i64(r1, t1); |
| tcg_temp_free(t1); |
| return NO_EXIT; |
| } |
| |
| /* First load the values of the first and last registers to trigger |
| possible page faults. */ |
| t2 = tcg_temp_new_i64(); |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); |
| tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); |
| store_reg32_i64(r1, t1); |
| store_reg32_i64(r3, t2); |
| |
| /* Only two registers to read. */ |
| if (((r1 + 1) & 15) == r3) { |
| tcg_temp_free(t2); |
| tcg_temp_free(t1); |
| return NO_EXIT; |
| } |
| |
| /* Then load the remaining registers. Page fault can't occur. */ |
| r3 = (r3 - 1) & 15; |
| tcg_gen_movi_i64(t2, 4); |
| while (r1 != r3) { |
| r1 = (r1 + 1) & 15; |
| tcg_gen_add_i64(o->in2, o->in2, t2); |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| store_reg32_i64(r1, t1); |
| } |
| tcg_temp_free(t2); |
| tcg_temp_free(t1); |
| |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lmh(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| TCGv_i64 t1, t2; |
| |
| /* Only one register to read. */ |
| t1 = tcg_temp_new_i64(); |
| if (unlikely(r1 == r3)) { |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| store_reg32h_i64(r1, t1); |
| tcg_temp_free(t1); |
| return NO_EXIT; |
| } |
| |
| /* First load the values of the first and last registers to trigger |
| possible page faults. */ |
| t2 = tcg_temp_new_i64(); |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); |
| tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); |
| store_reg32h_i64(r1, t1); |
| store_reg32h_i64(r3, t2); |
| |
| /* Only two registers to read. */ |
| if (((r1 + 1) & 15) == r3) { |
| tcg_temp_free(t2); |
| tcg_temp_free(t1); |
| return NO_EXIT; |
| } |
| |
| /* Then load the remaining registers. Page fault can't occur. */ |
| r3 = (r3 - 1) & 15; |
| tcg_gen_movi_i64(t2, 4); |
| while (r1 != r3) { |
| r1 = (r1 + 1) & 15; |
| tcg_gen_add_i64(o->in2, o->in2, t2); |
| tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
| store_reg32h_i64(r1, t1); |
| } |
| tcg_temp_free(t2); |
| tcg_temp_free(t1); |
| |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lm64(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, r1); |
| int r3 = get_field(s->fields, r3); |
| TCGv_i64 t1, t2; |
| |
| /* Only one register to read. */ |
| if (unlikely(r1 == r3)) { |
| tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); |
| return NO_EXIT; |
| } |
| |
| /* First load the values of the first and last registers to trigger |
| possible page faults. */ |
| t1 = tcg_temp_new_i64(); |
| t2 = tcg_temp_new_i64(); |
| tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); |
| tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); |
| tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); |
| tcg_gen_mov_i64(regs[r1], t1); |
| tcg_temp_free(t2); |
| |
| /* Only two registers to read. */ |
| if (((r1 + 1) & 15) == r3) { |
| tcg_temp_free(t1); |
| return NO_EXIT; |
| } |
| |
| /* Then load the remaining registers. Page fault can't occur. */ |
| r3 = (r3 - 1) & 15; |
| tcg_gen_movi_i64(t1, 8); |
| while (r1 != r3) { |
| r1 = (r1 + 1) & 15; |
| tcg_gen_add_i64(o->in2, o->in2, t1); |
| tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); |
| } |
| tcg_temp_free(t1); |
| |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_lura(DisasContext *s, DisasOps *o) |
| { |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_lura(o->out, cpu_env, o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_lurag(DisasContext *s, DisasOps *o) |
| { |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_lurag(o->out, cpu_env, o->in2); |
| return NO_EXIT; |
| } |
| #endif |
| |
| static ExitStatus op_mov2(DisasContext *s, DisasOps *o) |
| { |
| o->out = o->in2; |
| o->g_out = o->g_in2; |
| TCGV_UNUSED_I64(o->in2); |
| o->g_in2 = false; |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mov2e(DisasContext *s, DisasOps *o) |
| { |
| int b2 = get_field(s->fields, b2); |
| TCGv ar1 = tcg_temp_new_i64(); |
| |
| o->out = o->in2; |
| o->g_out = o->g_in2; |
| TCGV_UNUSED_I64(o->in2); |
| o->g_in2 = false; |
| |
| switch (s->tb->flags & FLAG_MASK_ASC) { |
| case PSW_ASC_PRIMARY >> 32: |
| tcg_gen_movi_i64(ar1, 0); |
| break; |
| case PSW_ASC_ACCREG >> 32: |
| tcg_gen_movi_i64(ar1, 1); |
| break; |
| case PSW_ASC_SECONDARY >> 32: |
| if (b2) { |
| tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2])); |
| } else { |
| tcg_gen_movi_i64(ar1, 0); |
| } |
| break; |
| case PSW_ASC_HOME >> 32: |
| tcg_gen_movi_i64(ar1, 2); |
| break; |
| } |
| |
| tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); |
| tcg_temp_free_i64(ar1); |
| |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_movx(DisasContext *s, DisasOps *o) |
| { |
| o->out = o->in1; |
| o->out2 = o->in2; |
| o->g_out = o->g_in1; |
| o->g_out2 = o->g_in2; |
| TCGV_UNUSED_I64(o->in1); |
| TCGV_UNUSED_I64(o->in2); |
| o->g_in1 = o->g_in2 = false; |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mvc(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
| potential_page_fault(s); |
| gen_helper_mvc(cpu_env, l, o->addr1, o->in2); |
| tcg_temp_free_i32(l); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mvcl(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
| potential_page_fault(s); |
| gen_helper_mvcl(cc_op, cpu_env, r1, r2); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mvcle(DisasContext *s, DisasOps *o) |
| { |
| TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
| TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
| potential_page_fault(s); |
| gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3); |
| tcg_temp_free_i32(r1); |
| tcg_temp_free_i32(r3); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static ExitStatus op_mvcp(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, l1); |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mvcs(DisasContext *s, DisasOps *o) |
| { |
| int r1 = get_field(s->fields, l1); |
| check_privileged(s); |
| potential_page_fault(s); |
| gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| #endif |
| |
| static ExitStatus op_mvpg(DisasContext *s, DisasOps *o) |
| { |
| potential_page_fault(s); |
| gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2); |
| set_cc_static(s); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mvst(DisasContext *s, DisasOps *o) |
| { |
| potential_page_fault(s); |
| gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2); |
| set_cc_static(s); |
| return_low128(o->in2); |
| return NO_EXIT; |
| } |
| |
| static ExitStatus op_mul(DisasContext *s, DisasOps *o) |
| { |
| tcg_gen_mul_i64(o->out, o->in1, o->in2 |