blob: 9a50df9a88fbc7ccb6e56408eab6a51e2a6ce372 [file] [log] [blame]
/*
* TriCore emulation for qemu: main translation routines.
*
* Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "tricore-opcodes.h"
#include "exec/log.h"
/*
* TCG registers
*/
static TCGv cpu_PC;
static TCGv cpu_PCXI;
static TCGv cpu_PSW;
static TCGv cpu_ICR;
/* GPR registers */
static TCGv cpu_gpr_a[16];
static TCGv cpu_gpr_d[16];
/* PSW Flag cache */
static TCGv cpu_PSW_C;
static TCGv cpu_PSW_V;
static TCGv cpu_PSW_SV;
static TCGv cpu_PSW_AV;
static TCGv cpu_PSW_SAV;
/* CPU env */
static TCGv_env cpu_env;
#include "exec/gen-icount.h"
static const char *regnames_a[] = {
"a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
"a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
"a12" , "a13" , "a14" , "a15",
};
static const char *regnames_d[] = {
"d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
"d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
"d12" , "d13" , "d14" , "d15",
};
typedef struct DisasContext {
struct TranslationBlock *tb;
target_ulong pc, saved_pc, next_pc;
uint32_t opcode;
int singlestep_enabled;
/* Routine used to access memory */
int mem_idx;
uint32_t hflags, saved_hflags;
int bstate;
} DisasContext;
enum {
BS_NONE = 0,
BS_STOP = 1,
BS_BRANCH = 2,
BS_EXCP = 3,
};
enum {
MODE_LL = 0,
MODE_LU = 1,
MODE_UL = 2,
MODE_UU = 3,
};
void tricore_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
uint32_t psw;
int i;
psw = psw_read(env);
cpu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC);
cpu_fprintf(f, " PSW: " TARGET_FMT_lx, psw);
cpu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR);
cpu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI);
cpu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX);
cpu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX);
for (i = 0; i < 16; ++i) {
if ((i & 3) == 0) {
cpu_fprintf(f, "\nGPR A%02d:", i);
}
cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]);
}
for (i = 0; i < 16; ++i) {
if ((i & 3) == 0) {
cpu_fprintf(f, "\nGPR D%02d:", i);
}
cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]);
}
cpu_fprintf(f, "\n");
}
/*
* Functions to generate micro-ops
*/
/* Makros for generating helpers */
#define gen_helper_1arg(name, arg) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg); \
gen_helper_##name(cpu_env, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while (0)
#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
tcg_gen_sari_tl(arg00, arg0, 16); \
tcg_gen_ext16s_tl(arg01, arg0); \
tcg_gen_ext16s_tl(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
tcg_temp_free(arg00); \
tcg_temp_free(arg01); \
tcg_temp_free(arg11); \
} while (0)
#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg10 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
tcg_gen_sari_tl(arg00, arg0, 16); \
tcg_gen_ext16s_tl(arg01, arg0); \
tcg_gen_sari_tl(arg11, arg1, 16); \
tcg_gen_ext16s_tl(arg10, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
tcg_temp_free(arg00); \
tcg_temp_free(arg01); \
tcg_temp_free(arg10); \
tcg_temp_free(arg11); \
} while (0)
#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg10 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
tcg_gen_sari_tl(arg00, arg0, 16); \
tcg_gen_ext16s_tl(arg01, arg0); \
tcg_gen_sari_tl(arg10, arg1, 16); \
tcg_gen_ext16s_tl(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
tcg_temp_free(arg00); \
tcg_temp_free(arg01); \
tcg_temp_free(arg10); \
tcg_temp_free(arg11); \
} while (0)
#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
tcg_gen_sari_tl(arg01, arg0, 16); \
tcg_gen_ext16s_tl(arg00, arg0); \
tcg_gen_sari_tl(arg11, arg1, 16); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
tcg_temp_free(arg00); \
tcg_temp_free(arg01); \
tcg_temp_free(arg11); \
} while (0)
#define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
TCGv_i64 ret = tcg_temp_new_i64(); \
TCGv_i64 arg1 = tcg_temp_new_i64(); \
\
tcg_gen_concat_i32_i64(arg1, al1, ah1); \
gen_helper_##name(ret, arg1, arg2); \
tcg_gen_extr_i64_i32(rl, rh, ret); \
\
tcg_temp_free_i64(ret); \
tcg_temp_free_i64(arg1); \
} while (0)
#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
TCGv_i64 ret = tcg_temp_new_i64(); \
\
gen_helper_##name(ret, cpu_env, arg1, arg2); \
tcg_gen_extr_i64_i32(rl, rh, ret); \
\
tcg_temp_free_i64(ret); \
} while (0)
#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
((offset & 0x0fffff) << 1))
/* For two 32-bit registers used a 64-bit register, the first
registernumber needs to be even. Otherwise we trap. */
static inline void generate_trap(DisasContext *ctx, int class, int tin);
#define CHECK_REG_PAIR(reg) do { \
if (reg & 0x1) { \
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
} \
} while (0)
/* Functions for load/save to/from memory */
static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, TCGMemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, con);
tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
tcg_temp_free(temp);
}
static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, TCGMemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, con);
tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
tcg_temp_free(temp);
}
static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
{
TCGv_i64 temp = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp, rl, rh);
tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
tcg_temp_free_i64(temp);
}
static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, base, con);
gen_st_2regs_64(rh, rl, temp, ctx);
tcg_temp_free(temp);
}
static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
{
TCGv_i64 temp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
/* write back to two 32 bit regs */
tcg_gen_extr_i64_i32(rl, rh, temp);
tcg_temp_free_i64(temp);
}
static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, base, con);
gen_ld_2regs_64(rh, rl, temp, ctx);
tcg_temp_free(temp);
}
static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
TCGMemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, off);
tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
tcg_gen_mov_tl(r2, temp);
tcg_temp_free(temp);
}
static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
TCGMemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, off);
tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
tcg_gen_mov_tl(r2, temp);
tcg_temp_free(temp);
}
/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
CHECK_REG_PAIR(ereg);
/* temp = (M(EA, word) */
tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
/* temp = temp & ~E[a][63:32]) */
tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
/* temp2 = (E[a][31:0] & E[a][63:32]); */
tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
/* temp = temp | temp2; */
tcg_gen_or_tl(temp, temp, temp2);
/* M(EA, word) = temp; */
tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
/* tmp = M(EA, word);
M(EA, word) = D[a];
D[a] = tmp[31:0];*/
static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
{
TCGv temp = tcg_temp_new();
tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
tcg_temp_free(temp);
}
static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
cpu_gpr_d[reg], temp);
tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
tcg_gen_or_tl(temp2, temp2, temp3);
tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
}
/* We generate loads and store to core special function register (csfr) through
the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
makros R, A and E, which allow read-only, all and endinit protected access.
These makros also specify in which ISA version the csfr was introduced. */
#define R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (tricore_feature(env, FEATURE)) { \
tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
{
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
gen_helper_psw_read(ret, cpu_env);
} else {
switch (offset) {
#include "csfr.def"
}
}
}
#undef R
#undef A
#undef E
#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
since no execption occurs */
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (tricore_feature(env, FEATURE)) { \
tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
/* Endinit protected registers
TODO: Since the endinit bit is in a register of a not yet implemented
watchdog device, we handle endinit protected registers like
all-access registers for now. */
#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
int32_t offset)
{
if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
gen_helper_psw_write(cpu_env, r1);
} else {
switch (offset) {
#include "csfr.def"
}
}
} else {
/* generate privilege trap */
}
}
/* Functions for arithmetic instructions */
static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
{
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
/* Addition and set V/SV bits */
tcg_gen_add_tl(result, r1, r2);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(t0, r1, r2);
tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(result);
tcg_temp_free(t0);
}
static inline void
gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
TCGv temp = tcg_temp_new();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
tcg_gen_add_i64(result, r1, r2);
/* calc v bit */
tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_andc_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
tcg_temp_free(temp);
tcg_temp_free_i64(result);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
}
static inline void
gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
void(*op2)(TCGv, TCGv, TCGv))
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv temp4 = tcg_temp_new();
(*op1)(temp, r1_low, r2);
/* calc V0 bit */
tcg_gen_xor_tl(temp2, temp, r1_low);
tcg_gen_xor_tl(temp3, r1_low, r2);
if (op1 == tcg_gen_add_tl) {
tcg_gen_andc_tl(temp2, temp2, temp3);
} else {
tcg_gen_and_tl(temp2, temp2, temp3);
}
(*op2)(temp3, r1_high, r3);
/* calc V1 bit */
tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
tcg_gen_xor_tl(temp4, r1_high, r3);
if (op2 == tcg_gen_add_tl) {
tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
} else {
tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
}
/* combine V0/V1 bits */
tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
/* calc sv bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
tcg_gen_mov_tl(ret_low, temp);
tcg_gen_mov_tl(ret_high, temp3);
/* calc AV bit */
tcg_gen_add_tl(temp, ret_low, ret_low);
tcg_gen_xor_tl(temp, temp, ret_low);
tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free(temp4);
}
/* ret = r2 + (r1 * r3); */
static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t1, r1);
tcg_gen_ext_i32_i64(t2, r2);
tcg_gen_ext_i32_i64(t3, r3);
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_add_i64(t1, t2, t1);
tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t1 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* t1 < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_madd32_d(ret, r1, r2, temp);
tcg_temp_free(temp);
}
static inline void
gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGv t3 = tcg_temp_new();
TCGv t4 = tcg_temp_new();
tcg_gen_muls2_tl(t1, t2, r1, r3);
/* only the add can overflow */
tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
tcg_gen_xor_tl(t1, r2_high, t2);
tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
tcg_gen_mov_tl(ret_low, t3);
tcg_gen_mov_tl(ret_high, t4);
tcg_temp_free(t1);
tcg_temp_free(t2);
tcg_temp_free(t3);
tcg_temp_free(t4);
}
static inline void
gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t1, r1);
tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
tcg_gen_extu_i32_i64(t3, r3);
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_add_i64(t2, t2, t1);
/* write back result */
tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
/* only the add overflows, if t2 < t1
calc V bit */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void
gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void
gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void
gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
tcg_gen_add_tl, tcg_gen_add_tl);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
tcg_gen_sub_tl, tcg_gen_add_tl);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
tcg_gen_ext32s_i64(temp64, temp64); /* low */
tcg_gen_sub_i64(temp64, temp64_2, temp64);
tcg_gen_shli_i64(temp64, temp64, 16);
gen_add64_d(temp64_2, temp64_3, temp64);
/* write back result */
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
tcg_temp_free_i64(temp64_3);
}
static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
static inline void
gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
tcg_gen_mov_tl(temp, cpu_PSW_V);
tcg_gen_mov_tl(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(temp64);
}
static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
static inline void
gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
tcg_gen_mov_tl(temp, cpu_PSW_V);
tcg_gen_mov_tl(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
tcg_gen_ext32s_i64(temp64, temp64); /* low */
tcg_gen_sub_i64(temp64, temp64_2, temp64);
tcg_gen_shli_i64(temp64, temp64, 16);
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
}
static inline void
gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_add64_d(temp64_3, temp64_2, temp64);
/* write back result */
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
tcg_temp_free_i64(temp64_3);
}
static inline void
gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
}
static inline void
gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_const_i32(n);
gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
tcg_temp_free(temp);
}
static inline void
gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_const_i32(n);
gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
tcg_temp_free(temp);
}
static inline void
gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift, CPUTriCoreState *env)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_shli_i64(t2, t2, n);
tcg_gen_ext_i32_i64(t1, arg1);
tcg_gen_sari_i64(t2, t2, up_shift);
tcg_gen_add_i64(t3, t1, t2);
tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
tcg_gen_and_tl(temp, temp, temp2);
tcg_gen_shli_tl(temp, temp, 31);
/* negate v bit, if special condition */
tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
}
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void
gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
gen_add_d(ret, arg1, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
gen_adds(ret, arg1, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
gen_add64_d(t3, t1, t2);
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t3);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
gen_helper_add64_ssov(t1, cpu_env, t1, t2);
tcg_gen_extr_i64_i32(rl, rh, t1);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
}
static inline void
gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n, CPUTriCoreState *env)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
TCGv temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
if (n != 0) {
tcg_gen_shli_i64(t2, t2, 1);
}
tcg_gen_add_i64(t4, t1, t2);
/* calc v bit */
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_andc_i64(t3, t3, t2);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
temp = tcg_temp_new();
temp2 = tcg_temp_new();
tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
tcg_gen_and_tl(temp, temp, temp2);
tcg_gen_shli_tl(temp, temp, 31);
/* negate v bit, if special condition */
tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t4);
}
static inline void
gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t1, arg1);
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_sari_i64(t2, t2, up_shift - n);
gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void
gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
TCGv temp = tcg_const_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
tcg_gen_extr_i64_i32(rl, rh, r1);
tcg_temp_free_i64(r1);
tcg_temp_free(temp);
}
/* ret = r2 - (r1 * r3); */
static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t1, r1);
tcg_gen_ext_i32_i64(t2, r2);
tcg_gen_ext_i32_i64(t3, r3);
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_sub_i64(t1, t2, t1);
tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t2 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* result < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_msub32_d(ret, r1, r2, temp);
tcg_temp_free(temp);
}
static inline void
gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGv t3 = tcg_temp_new();
TCGv t4 = tcg_temp_new();
tcg_gen_muls2_tl(t1, t2, r1, r3);
/* only the sub can overflow */
tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
tcg_gen_xor_tl(t1, r2_high, t2);
tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
tcg_gen_mov_tl(ret_low, t3);
tcg_gen_mov_tl(ret_high, t4);
tcg_temp_free(t1);
tcg_temp_free(t2);
tcg_temp_free(t3);
tcg_temp_free(t4);
}
static inline void
gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void
gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t1, r1);
tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
tcg_gen_extu_i32_i64(t3, r3);
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_sub_i64(t3, t2, t1);
tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
/* calc V bit, only the sub can overflow, if t1 > t2 */
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
}
static inline void
gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
{
TCGv temp = tcg_const_i32(r2);
gen_add_d(ret, r1, temp);
tcg_temp_free(temp);
}
/* calculate the carry bit too */
static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
tcg_gen_movi_tl(t0, 0);
/* Addition and set C/V/SV bits */
tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(t0, r1, r2);
tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(result);
tcg_temp_free(t0);
}
static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_add_CC(ret, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv carry = tcg_temp_new_i32();
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
tcg_gen_movi_tl(t0, 0);
tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
/* Addition, carry and set C/V/SV bits */
tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(t0, r1, r2);
tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(result);
tcg_temp_free(t0);
tcg_temp_free(carry);
}
static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_addc_CC(ret, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
TCGv r4)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
TCGv t0 = tcg_const_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
tcg_gen_shli_tl(mask, mask, 31);
tcg_gen_add_tl(result, r1, r2);
/* Calc PSW_V */
tcg_gen_xor_tl(temp, result, r1);
tcg_gen_xor_tl(temp2, r1, r2);
tcg_gen_andc_tl(temp, temp, temp2);
tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
tcg_gen_and_tl(temp, temp, mask);
tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
tcg_gen_add_tl(temp, result, result);
tcg_gen_xor_tl(temp, temp, result);
tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_and_tl(temp, temp, mask);
tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
tcg_temp_free(t0);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(result);
tcg_temp_free(mask);
}
static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
TCGv r3, TCGv r4)
{
TCGv temp = tcg_const_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
tcg_temp_free(temp);
}
static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
tcg_gen_sub_tl(result, r1, r2);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(temp, r1, r2);
tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(temp);
tcg_temp_free(result);
}
static inline void
gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
TCGv temp = tcg_temp_new();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
tcg_gen_sub_i64(result, r1, r2);
/* calc v bit */
tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_and_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
tcg_temp_free(temp);
tcg_temp_free_i64(result);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
}
static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv result = tcg_temp_new();
TCGv temp = tcg_temp_new();
tcg_gen_sub_tl(result, r1, r2);
/* calc C bit */
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(temp, r1, r2);
tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(result);
tcg_temp_free(temp);
}
static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new();
tcg_gen_not_tl(temp, r2);
gen_addc_CC(ret, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
TCGv r4)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
TCGv t0 = tcg_const_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
tcg_gen_shli_tl(mask, mask, 31);
tcg_gen_sub_tl(result, r1, r2);
/* Calc PSW_V */
tcg_gen_xor_tl(temp, result, r1);
tcg_gen_xor_tl(temp2, r1, r2);
tcg_gen_and_tl(temp, temp, temp2);
tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
tcg_gen_and_tl(temp, temp, mask);
tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
tcg_gen_add_tl(temp, result, result);
tcg_gen_xor_tl(temp, temp, result);
tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_and_tl(temp, temp, mask);
tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
tcg_temp_free(t0);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(result);
tcg_temp_free(mask);
}
static inline void
gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
tcg_gen_sub_tl, tcg_gen_sub_tl);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
tcg_gen_mov_tl(temp, cpu_PSW_V);
tcg_gen_mov_tl(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_sub64_d(temp64_3, temp64_2, temp64);
/* write back result */
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
tcg_temp_free_i64(temp64_3);
}
static inline void
gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
}
static inline void
gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_const_i32(n);
gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
tcg_temp_free(temp);
}
static inline void
gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_const_i32(n);
gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
tcg_temp_free(temp);
}
static inline void
gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift, CPUTriCoreState *env)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_ext_i32_i64(t1, arg1);
/* if we shift part of the fraction out, we need to round up */
tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
tcg_gen_sari_i64(t2, t2, up_shift - n);
tcg_gen_add_i64(t2, t2, t4);
tcg_gen_sub_i64(t3, t1, t2);
tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t4);
}
static inline void
gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
gen_sub_d(ret, arg1, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
gen_subs(ret, arg1, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
gen_sub64_d(t3, t1, t2);
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t3);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
static inline void
gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
tcg_gen_mul_tl(temp, arg2, arg3);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(temp, arg2, arg3);
tcg_gen_shli_tl(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
tcg_gen_sub_tl(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
tcg_gen_extr_i64_i32(rl, rh, t1);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
}
static inline void
gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n, CPUTriCoreState *env)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
TCGv temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
if (n != 0) {
tcg_gen_shli_i64(t2, t2, 1);
}
tcg_gen_sub_i64(t4, t1, t2);
/* calc v bit */
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_and_i64(t3, t3, t2);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
temp = tcg_temp_new();
temp2 = tcg_temp_new();
tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
tcg_gen_and_tl(temp, temp, temp2);
tcg_gen_shli_tl(temp, temp, 31);
/* negate v bit, if special condition */
tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
tcg_temp_free(temp);
tcg_temp_free(temp2);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t4);
}
static inline void
gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(t1, arg1);
tcg_gen_ext_i32_i64(t2, arg2);
tcg_gen_ext_i32_i64(t3, arg3);
tcg_gen_mul_i64(t2, t2, t3);
/* if we shift part of the fraction out, we need to round up */
tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
tcg_gen_sari_i64(t3, t2, up_shift - n);
tcg_gen_add_i64(t3, t3, t4);
gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t4);
}
static inline void
gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
TCGv temp = tcg_const_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
tcg_gen_extr_i64_i32(rl, rh, r1);
tcg_temp_free_i64(r1);
tcg_temp_free(temp);
}
static inline void
gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
tcg_gen_add_tl, tcg_gen_sub_tl);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
tcg_gen_ext32s_i64(temp64, temp64); /* low */
tcg_gen_sub_i64(temp64, temp64_2, temp64);
tcg_gen_shli_i64(temp64, temp64, 16);
gen_sub64_d(temp64_2, temp64_3, temp64);
/* write back result */
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
tcg_temp_free_i64(temp64_3);
}
static inline void
gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
tcg_gen_mov_tl(temp, cpu_PSW_V);
tcg_gen_mov_tl(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
tcg_gen_ext32s_i64(temp64, temp64); /* low */
tcg_gen_sub_i64(temp64, temp64_2, temp64);
tcg_gen_shli_i64(temp64, temp64, 16);
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free(temp);
tcg_temp_free_i64(temp64);
tcg_temp_free_i64(temp64_2);
}
static inline void
gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_const_i32(n);
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
break;
case MODE_LU:
GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
break;
case MODE_UL:
GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
break;
case MODE_UU:
GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free_i64(temp64);
}
static inline void gen_abs(TCGv ret, TCGv r1)
{
TCGv temp = tcg_temp_new();
TCGv t0 = tcg_const_i32(0);
tcg_gen_neg_tl(temp, r1);
tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp);
/* overflow can only happen, if r1 = 0x80000000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free(temp);
tcg_temp_free(t0);
}
static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
tcg_gen_sub_tl(result, r1, r2);
tcg_gen_sub_tl(temp, r2, r1);
tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(temp, result, r2);
tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
tcg_gen_xor_tl(temp, r1, r2);
tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, result, result);
tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_tl(ret, result);
tcg_temp_free(temp);
tcg_temp_free(result);
}
static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_absdif(ret, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
{
TCGv high = tcg_temp_new();
TCGv low = tcg_temp_new();
tcg_gen_muls2_tl(low, high, r1, r2);
tcg_gen_mov_tl(ret, low);
/* calc V bit */
tcg_gen_sari_tl(low, low, 31);
tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free(high);
tcg_temp_free(low);
}
static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_mul_i32s(ret, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
/* clear V bit */
tcg_gen_movi_tl(cpu_PSW_V, 0);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
/* clear V bit */
tcg_gen_movi_tl(cpu_PSW_V, 0);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_mul_ssov(ret, cpu_env, r1, temp);
tcg_temp_free(temp);
}
static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_mul_suov(ret, cpu_env, r1, temp);
tcg_temp_free(temp);
}
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
tcg_temp_free(temp);
}
static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
tcg_temp_free(temp);
}
static void
gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
{
TCGv temp = tcg_temp_new();
TCGv_i64 temp_64 = tcg_temp_new_i64();
TCGv_i64 temp2_64 = tcg_temp_new_i64();
if (n == 0) {
if (up_shift == 32) {
tcg_gen_muls2_tl(rh, rl, arg1, arg2);
} else if (up_shift == 16) {
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
tcg_gen_shri_i64(temp_64, temp_64, up_shift);
tcg_gen_extr_i64_i32(rl, rh, temp_64);
} else {
tcg_gen_muls2_tl(rl, rh, arg1, arg2);
}
/* reset v bit */
tcg_gen_movi_tl(cpu_PSW_V, 0);
} else { /* n is expected to be 1 */
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
if (up_shift == 0) {
tcg_gen_shli_i64(temp_64, temp_64, 1);
} else {
tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1);
}
tcg_gen_extr_i64_i32(rl, rh, temp_64);
/* overflow only occurs if r1 = r2 = 0x8000 */
if (up_shift == 0) {/* result is 64 bit */
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
0x80000000);
} else { /* result is 32 bit */
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
0x80000000);
}
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* calc sv overflow bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
}
/* calc av overflow bit */
if (up_shift == 0) {
tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
} else {
tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
}
/* calc sav overflow bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free(temp);
tcg_temp_free_i64(temp_64);
tcg_temp_free_i64(temp2_64);
}
static void
gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
{
TCGv temp = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(ret, arg1, arg2);
} else { /* n is expected to be 1 */
tcg_gen_mul_tl(ret, arg1, arg2);
tcg_gen_shli_tl(ret, ret, 1);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
tcg_gen_sub_tl(ret, ret, temp);
}
/* reset v bit */
tcg_gen_movi_tl(cpu_PSW_V, 0);
/* calc av overflow bit */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
tcg_temp_free(temp);
}
static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
{
TCGv temp = tcg_temp_new();
if (n == 0) {
tcg_gen_mul_tl(ret, arg1, arg2);
tcg_gen_addi_tl(ret, ret, 0x8000);
} else {
tcg_gen_mul_tl(ret, arg1, arg2);
tcg_gen_shli_tl(ret, ret, 1);
tcg_gen_addi_tl(ret, ret, 0x8000);
/* catch special case r1 = r2 = 0x8000 */
tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
tcg_gen_muli_tl(temp, temp, 0x8001);
tcg_gen_sub_tl(ret, ret, temp);
}
/* reset v bit */
tcg_gen_movi_tl(cpu_PSW_V, 0);
/* calc av overflow bit */
tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* cut halfword off */
tcg_gen_andi_tl(ret, ret, 0xffff0000);
tcg_temp_free(temp);
}
static inline void
gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void
gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free_i64(temp64);
}
static inline void
gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
tcg_temp_free(temp);
}
static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
tcg_temp_free(temp);
}
static inline void
gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static inline void
gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
tcg_temp_free_i64(temp64);
}
static inline void
gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
TCGv temp = tcg_const_i32(con);
gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
tcg_temp_free(temp);
}
static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
{
TCGv sat_neg = tcg_const_i32(low);
TCGv temp = tcg_const_i32(up);
/* sat_neg = (arg < low ) ? low : arg; */
tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
/* ret = (sat_neg > up ) ? up : sat_neg; */
tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
tcg_temp_free(sat_neg);
tcg_temp_free(temp);
}
static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
{
TCGv temp = tcg_const_i32(up);
/* sat_neg = (arg > up ) ? up : arg; */
tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
tcg_temp_free(temp);
}
static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
{
if (shift_count == -32) {
tcg_gen_movi_tl(ret, 0);
} else if (shift_count >= 0) {
tcg_gen_shli_tl(ret, r1, shift_count);
} else {
tcg_gen_shri_tl(ret, r1, -shift_count);
}
}
static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
{
TCGv temp_low, temp_high;
if (shiftcount == -16) {
tcg_gen_movi_tl(ret, 0);
} else {
temp_high = tcg_temp_new();
temp_low = tcg_temp_new();
tcg_gen_andi_tl(temp_low, r1, 0xffff);
tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
gen_shi(temp_low, temp_low, shiftcount);
gen_shi(ret, temp_high, shiftcount);
tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
tcg_temp_free(temp_low);
tcg_temp_free(temp_high);
}
}
static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
{
uint32_t msk, msk_start;
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv t_0 = tcg_const_i32(0);
if (shift_count == 0) {
/* Clear PSW.C and PSW.V */
tcg_gen_movi_tl(cpu_PSW_C, 0);
tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
tcg_gen_mov_tl(ret, r1);
} else if (shift_count == -32) {
/* set PSW.C */
tcg_gen_mov_tl(cpu_PSW_C, r1);
/* fill ret completely with sign bit */
tcg_gen_sari_tl(ret, r1, 31);
/* clear PSW.V */
tcg_gen_movi_tl(cpu_PSW_V, 0);
} else if (shift_count > 0) {
TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
/* calc carry */
msk_start = 32 - shift_count;
msk = ((1 << shift_count) - 1) << msk_start;
tcg_gen_andi_tl(cpu_PSW_C, r1, ms