blob: 8db0a84016008a9127a769a1ade2c8b382f05e09 [file] [log] [blame]
/*
* MIPS32 emulation for qemu: main translation routines.
*
* Copyright (c) 2004-2005 Jocelyn Mayer
* Copyright (c) 2006 Marius Groeger (FPU operations)
* Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu-common.h"
#include "helper.h"
#define GEN_HELPER 1
#include "helper.h"
//#define MIPS_DEBUG_DISAS
//#define MIPS_DEBUG_SIGN_EXTENSIONS
/* MIPS major opcodes */
#define MASK_OP_MAJOR(op) (op & (0x3F << 26))
enum {
/* indirect opcode tables */
OPC_SPECIAL = (0x00 << 26),
OPC_REGIMM = (0x01 << 26),
OPC_CP0 = (0x10 << 26),
OPC_CP1 = (0x11 << 26),
OPC_CP2 = (0x12 << 26),
OPC_CP3 = (0x13 << 26),
OPC_SPECIAL2 = (0x1C << 26),
OPC_SPECIAL3 = (0x1F << 26),
/* arithmetic with immediate */
OPC_ADDI = (0x08 << 26),
OPC_ADDIU = (0x09 << 26),
OPC_SLTI = (0x0A << 26),
OPC_SLTIU = (0x0B << 26),
/* logic with immediate */
OPC_ANDI = (0x0C << 26),
OPC_ORI = (0x0D << 26),
OPC_XORI = (0x0E << 26),
OPC_LUI = (0x0F << 26),
/* arithmetic with immediate */
OPC_DADDI = (0x18 << 26),
OPC_DADDIU = (0x19 << 26),
/* Jump and branches */
OPC_J = (0x02 << 26),
OPC_JAL = (0x03 << 26),
OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */
OPC_BEQL = (0x14 << 26),
OPC_BNE = (0x05 << 26),
OPC_BNEL = (0x15 << 26),
OPC_BLEZ = (0x06 << 26),
OPC_BLEZL = (0x16 << 26),
OPC_BGTZ = (0x07 << 26),
OPC_BGTZL = (0x17 << 26),
OPC_JALX = (0x1D << 26), /* MIPS 16 only */
/* Load and stores */
OPC_LDL = (0x1A << 26),
OPC_LDR = (0x1B << 26),
OPC_LB = (0x20 << 26),
OPC_LH = (0x21 << 26),
OPC_LWL = (0x22 << 26),
OPC_LW = (0x23 << 26),
OPC_LBU = (0x24 << 26),
OPC_LHU = (0x25 << 26),
OPC_LWR = (0x26 << 26),
OPC_LWU = (0x27 << 26),
OPC_SB = (0x28 << 26),
OPC_SH = (0x29 << 26),
OPC_SWL = (0x2A << 26),
OPC_SW = (0x2B << 26),
OPC_SDL = (0x2C << 26),
OPC_SDR = (0x2D << 26),
OPC_SWR = (0x2E << 26),
OPC_LL = (0x30 << 26),
OPC_LLD = (0x34 << 26),
OPC_LD = (0x37 << 26),
OPC_SC = (0x38 << 26),
OPC_SCD = (0x3C << 26),
OPC_SD = (0x3F << 26),
/* Floating point load/store */
OPC_LWC1 = (0x31 << 26),
OPC_LWC2 = (0x32 << 26),
OPC_LDC1 = (0x35 << 26),
OPC_LDC2 = (0x36 << 26),
OPC_SWC1 = (0x39 << 26),
OPC_SWC2 = (0x3A << 26),
OPC_SDC1 = (0x3D << 26),
OPC_SDC2 = (0x3E << 26),
/* MDMX ASE specific */
OPC_MDMX = (0x1E << 26),
/* Cache and prefetch */
OPC_CACHE = (0x2F << 26),
OPC_PREF = (0x33 << 26),
/* Reserved major opcode */
OPC_MAJOR3B_RESERVED = (0x3B << 26),
};
/* MIPS special opcodes */
#define MASK_SPECIAL(op) MASK_OP_MAJOR(op) | (op & 0x3F)
enum {
/* Shifts */
OPC_SLL = 0x00 | OPC_SPECIAL,
/* NOP is SLL r0, r0, 0 */
/* SSNOP is SLL r0, r0, 1 */
/* EHB is SLL r0, r0, 3 */
OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */
OPC_SRA = 0x03 | OPC_SPECIAL,
OPC_SLLV = 0x04 | OPC_SPECIAL,
OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */
OPC_SRAV = 0x07 | OPC_SPECIAL,
OPC_DSLLV = 0x14 | OPC_SPECIAL,
OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */
OPC_DSRAV = 0x17 | OPC_SPECIAL,
OPC_DSLL = 0x38 | OPC_SPECIAL,
OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */
OPC_DSRA = 0x3B | OPC_SPECIAL,
OPC_DSLL32 = 0x3C | OPC_SPECIAL,
OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */
OPC_DSRA32 = 0x3F | OPC_SPECIAL,
/* Multiplication / division */
OPC_MULT = 0x18 | OPC_SPECIAL,
OPC_MULTU = 0x19 | OPC_SPECIAL,
OPC_DIV = 0x1A | OPC_SPECIAL,
OPC_DIVU = 0x1B | OPC_SPECIAL,
OPC_DMULT = 0x1C | OPC_SPECIAL,
OPC_DMULTU = 0x1D | OPC_SPECIAL,
OPC_DDIV = 0x1E | OPC_SPECIAL,
OPC_DDIVU = 0x1F | OPC_SPECIAL,
/* 2 registers arithmetic / logic */
OPC_ADD = 0x20 | OPC_SPECIAL,
OPC_ADDU = 0x21 | OPC_SPECIAL,
OPC_SUB = 0x22 | OPC_SPECIAL,
OPC_SUBU = 0x23 | OPC_SPECIAL,
OPC_AND = 0x24 | OPC_SPECIAL,
OPC_OR = 0x25 | OPC_SPECIAL,
OPC_XOR = 0x26 | OPC_SPECIAL,
OPC_NOR = 0x27 | OPC_SPECIAL,
OPC_SLT = 0x2A | OPC_SPECIAL,
OPC_SLTU = 0x2B | OPC_SPECIAL,
OPC_DADD = 0x2C | OPC_SPECIAL,
OPC_DADDU = 0x2D | OPC_SPECIAL,
OPC_DSUB = 0x2E | OPC_SPECIAL,
OPC_DSUBU = 0x2F | OPC_SPECIAL,
/* Jumps */
OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
/* Traps */
OPC_TGE = 0x30 | OPC_SPECIAL,
OPC_TGEU = 0x31 | OPC_SPECIAL,
OPC_TLT = 0x32 | OPC_SPECIAL,
OPC_TLTU = 0x33 | OPC_SPECIAL,
OPC_TEQ = 0x34 | OPC_SPECIAL,
OPC_TNE = 0x36 | OPC_SPECIAL,
/* HI / LO registers load & stores */
OPC_MFHI = 0x10 | OPC_SPECIAL,
OPC_MTHI = 0x11 | OPC_SPECIAL,
OPC_MFLO = 0x12 | OPC_SPECIAL,
OPC_MTLO = 0x13 | OPC_SPECIAL,
/* Conditional moves */
OPC_MOVZ = 0x0A | OPC_SPECIAL,
OPC_MOVN = 0x0B | OPC_SPECIAL,
OPC_MOVCI = 0x01 | OPC_SPECIAL,
/* Special */
OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */
OPC_SYSCALL = 0x0C | OPC_SPECIAL,
OPC_BREAK = 0x0D | OPC_SPECIAL,
OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */
OPC_SYNC = 0x0F | OPC_SPECIAL,
OPC_SPECIAL15_RESERVED = 0x15 | OPC_SPECIAL,
OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL,
OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL,
OPC_SPECIAL35_RESERVED = 0x35 | OPC_SPECIAL,
OPC_SPECIAL37_RESERVED = 0x37 | OPC_SPECIAL,
OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL,
OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL,
};
/* Multiplication variants of the vr54xx. */
#define MASK_MUL_VR54XX(op) MASK_SPECIAL(op) | (op & (0x1F << 6))
enum {
OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT,
OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU,
OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT,
OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU,
OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT,
OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU,
OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT,
OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU,
OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT,
OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU,
OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT,
OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU,
OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT,
OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU,
};
/* REGIMM (rt field) opcodes */
#define MASK_REGIMM(op) MASK_OP_MAJOR(op) | (op & (0x1F << 16))
enum {
OPC_BLTZ = (0x00 << 16) | OPC_REGIMM,
OPC_BLTZL = (0x02 << 16) | OPC_REGIMM,
OPC_BGEZ = (0x01 << 16) | OPC_REGIMM,
OPC_BGEZL = (0x03 << 16) | OPC_REGIMM,
OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM,
OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM,
OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM,
OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM,
OPC_TGEI = (0x08 << 16) | OPC_REGIMM,
OPC_TGEIU = (0x09 << 16) | OPC_REGIMM,
OPC_TLTI = (0x0A << 16) | OPC_REGIMM,
OPC_TLTIU = (0x0B << 16) | OPC_REGIMM,
OPC_TEQI = (0x0C << 16) | OPC_REGIMM,
OPC_TNEI = (0x0E << 16) | OPC_REGIMM,
OPC_SYNCI = (0x1F << 16) | OPC_REGIMM,
};
/* Special2 opcodes */
#define MASK_SPECIAL2(op) MASK_OP_MAJOR(op) | (op & 0x3F)
enum {
/* Multiply & xxx operations */
OPC_MADD = 0x00 | OPC_SPECIAL2,
OPC_MADDU = 0x01 | OPC_SPECIAL2,
OPC_MUL = 0x02 | OPC_SPECIAL2,
OPC_MSUB = 0x04 | OPC_SPECIAL2,
OPC_MSUBU = 0x05 | OPC_SPECIAL2,
/* Misc */
OPC_CLZ = 0x20 | OPC_SPECIAL2,
OPC_CLO = 0x21 | OPC_SPECIAL2,
OPC_DCLZ = 0x24 | OPC_SPECIAL2,
OPC_DCLO = 0x25 | OPC_SPECIAL2,
/* Special */
OPC_SDBBP = 0x3F | OPC_SPECIAL2,
};
/* Special3 opcodes */
#define MASK_SPECIAL3(op) MASK_OP_MAJOR(op) | (op & 0x3F)
enum {
OPC_EXT = 0x00 | OPC_SPECIAL3,
OPC_DEXTM = 0x01 | OPC_SPECIAL3,
OPC_DEXTU = 0x02 | OPC_SPECIAL3,
OPC_DEXT = 0x03 | OPC_SPECIAL3,
OPC_INS = 0x04 | OPC_SPECIAL3,
OPC_DINSM = 0x05 | OPC_SPECIAL3,
OPC_DINSU = 0x06 | OPC_SPECIAL3,
OPC_DINS = 0x07 | OPC_SPECIAL3,
OPC_FORK = 0x08 | OPC_SPECIAL3,
OPC_YIELD = 0x09 | OPC_SPECIAL3,
OPC_BSHFL = 0x20 | OPC_SPECIAL3,
OPC_DBSHFL = 0x24 | OPC_SPECIAL3,
OPC_RDHWR = 0x3B | OPC_SPECIAL3,
};
/* BSHFL opcodes */
#define MASK_BSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
enum {
OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
OPC_SEB = (0x10 << 6) | OPC_BSHFL,
OPC_SEH = (0x18 << 6) | OPC_BSHFL,
};
/* DBSHFL opcodes */
#define MASK_DBSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
enum {
OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
};
/* Coprocessor 0 (rs field) */
#define MASK_CP0(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
enum {
OPC_MFC0 = (0x00 << 21) | OPC_CP0,
OPC_DMFC0 = (0x01 << 21) | OPC_CP0,
OPC_MTC0 = (0x04 << 21) | OPC_CP0,
OPC_DMTC0 = (0x05 << 21) | OPC_CP0,
OPC_MFTR = (0x08 << 21) | OPC_CP0,
OPC_RDPGPR = (0x0A << 21) | OPC_CP0,
OPC_MFMC0 = (0x0B << 21) | OPC_CP0,
OPC_MTTR = (0x0C << 21) | OPC_CP0,
OPC_WRPGPR = (0x0E << 21) | OPC_CP0,
OPC_C0 = (0x10 << 21) | OPC_CP0,
OPC_C0_FIRST = (0x10 << 21) | OPC_CP0,
OPC_C0_LAST = (0x1F << 21) | OPC_CP0,
};
/* MFMC0 opcodes */
#define MASK_MFMC0(op) MASK_CP0(op) | (op & 0xFFFF)
enum {
OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0,
OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0,
OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0,
OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0,
};
/* Coprocessor 0 (with rs == C0) */
#define MASK_C0(op) MASK_CP0(op) | (op & 0x3F)
enum {
OPC_TLBR = 0x01 | OPC_C0,
OPC_TLBWI = 0x02 | OPC_C0,
OPC_TLBWR = 0x06 | OPC_C0,
OPC_TLBP = 0x08 | OPC_C0,
OPC_RFE = 0x10 | OPC_C0,
OPC_ERET = 0x18 | OPC_C0,
OPC_DERET = 0x1F | OPC_C0,
OPC_WAIT = 0x20 | OPC_C0,
};
/* Coprocessor 1 (rs field) */
#define MASK_CP1(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
enum {
OPC_MFC1 = (0x00 << 21) | OPC_CP1,
OPC_DMFC1 = (0x01 << 21) | OPC_CP1,
OPC_CFC1 = (0x02 << 21) | OPC_CP1,
OPC_MFHC1 = (0x03 << 21) | OPC_CP1,
OPC_MTC1 = (0x04 << 21) | OPC_CP1,
OPC_DMTC1 = (0x05 << 21) | OPC_CP1,
OPC_CTC1 = (0x06 << 21) | OPC_CP1,
OPC_MTHC1 = (0x07 << 21) | OPC_CP1,
OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */
OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1,
OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1,
OPC_S_FMT = (0x10 << 21) | OPC_CP1, /* 16: fmt=single fp */
OPC_D_FMT = (0x11 << 21) | OPC_CP1, /* 17: fmt=double fp */
OPC_E_FMT = (0x12 << 21) | OPC_CP1, /* 18: fmt=extended fp */
OPC_Q_FMT = (0x13 << 21) | OPC_CP1, /* 19: fmt=quad fp */
OPC_W_FMT = (0x14 << 21) | OPC_CP1, /* 20: fmt=32bit fixed */
OPC_L_FMT = (0x15 << 21) | OPC_CP1, /* 21: fmt=64bit fixed */
OPC_PS_FMT = (0x16 << 21) | OPC_CP1, /* 22: fmt=paired single fp */
};
#define MASK_CP1_FUNC(op) MASK_CP1(op) | (op & 0x3F)
#define MASK_BC1(op) MASK_CP1(op) | (op & (0x3 << 16))
enum {
OPC_BC1F = (0x00 << 16) | OPC_BC1,
OPC_BC1T = (0x01 << 16) | OPC_BC1,
OPC_BC1FL = (0x02 << 16) | OPC_BC1,
OPC_BC1TL = (0x03 << 16) | OPC_BC1,
};
enum {
OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2,
OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2,
};
enum {
OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4,
OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4,
};
#define MASK_CP2(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
enum {
OPC_MFC2 = (0x00 << 21) | OPC_CP2,
OPC_DMFC2 = (0x01 << 21) | OPC_CP2,
OPC_CFC2 = (0x02 << 21) | OPC_CP2,
OPC_MFHC2 = (0x03 << 21) | OPC_CP2,
OPC_MTC2 = (0x04 << 21) | OPC_CP2,
OPC_DMTC2 = (0x05 << 21) | OPC_CP2,
OPC_CTC2 = (0x06 << 21) | OPC_CP2,
OPC_MTHC2 = (0x07 << 21) | OPC_CP2,
OPC_BC2 = (0x08 << 21) | OPC_CP2,
};
#define MASK_CP3(op) MASK_OP_MAJOR(op) | (op & 0x3F)
enum {
OPC_LWXC1 = 0x00 | OPC_CP3,
OPC_LDXC1 = 0x01 | OPC_CP3,
OPC_LUXC1 = 0x05 | OPC_CP3,
OPC_SWXC1 = 0x08 | OPC_CP3,
OPC_SDXC1 = 0x09 | OPC_CP3,
OPC_SUXC1 = 0x0D | OPC_CP3,
OPC_PREFX = 0x0F | OPC_CP3,
OPC_ALNV_PS = 0x1E | OPC_CP3,
OPC_MADD_S = 0x20 | OPC_CP3,
OPC_MADD_D = 0x21 | OPC_CP3,
OPC_MADD_PS = 0x26 | OPC_CP3,
OPC_MSUB_S = 0x28 | OPC_CP3,
OPC_MSUB_D = 0x29 | OPC_CP3,
OPC_MSUB_PS = 0x2E | OPC_CP3,
OPC_NMADD_S = 0x30 | OPC_CP3,
OPC_NMADD_D = 0x31 | OPC_CP3,
OPC_NMADD_PS= 0x36 | OPC_CP3,
OPC_NMSUB_S = 0x38 | OPC_CP3,
OPC_NMSUB_D = 0x39 | OPC_CP3,
OPC_NMSUB_PS= 0x3E | OPC_CP3,
};
/* global register indices */
static TCGv_ptr cpu_env;
static TCGv cpu_gpr[32], cpu_PC;
static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC], cpu_ACX[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond;
static TCGv_i32 hflags;
static TCGv_i32 fpu_fcr0, fpu_fcr31;
static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
#include "exec/gen-icount.h"
#define gen_helper_0i(name, arg) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg); \
gen_helper_##name(helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
#define gen_helper_1i(name, arg1, arg2) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
gen_helper_##name(arg1, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
#define gen_helper_2i(name, arg1, arg2, arg3) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
gen_helper_##name(arg1, arg2, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
#define gen_helper_3i(name, arg1, arg2, arg3, arg4) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg4); \
gen_helper_##name(arg1, arg2, arg3, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
#define gen_helper_4i(name, arg1, arg2, arg3, arg4, arg5) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg5); \
gen_helper_##name(arg1, arg2, arg3, arg4, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
typedef struct DisasContext {
struct TranslationBlock *tb;
target_ulong pc, saved_pc;
uint32_t opcode;
int singlestep_enabled;
/* Routine used to access memory */
int mem_idx;
uint32_t hflags, saved_hflags;
int bstate;
target_ulong btarget;
} DisasContext;
enum {
BS_NONE = 0, /* We go out of the TB without reaching a branch or an
* exception condition */
BS_STOP = 1, /* We want to stop translation for any reason */
BS_BRANCH = 2, /* We reached a branch condition */
BS_EXCP = 3, /* We reached an exception condition */
};
static const char *regnames[] =
{ "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", };
static const char *regnames_HI[] =
{ "HI0", "HI1", "HI2", "HI3", };
static const char *regnames_LO[] =
{ "LO0", "LO1", "LO2", "LO3", };
static const char *regnames_ACX[] =
{ "ACX0", "ACX1", "ACX2", "ACX3", };
static const char *fregnames[] =
{ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", };
#ifdef MIPS_DEBUG_DISAS
#define MIPS_DEBUG(fmt, ...) \
qemu_log_mask(CPU_LOG_TB_IN_ASM, \
TARGET_FMT_lx ": %08x " fmt "\n", \
ctx->pc, ctx->opcode , ## __VA_ARGS__)
#define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
#else
#define MIPS_DEBUG(fmt, ...) do { } while(0)
#define LOG_DISAS(...) do { } while (0)
#endif
#define MIPS_INVAL(op) \
do { \
MIPS_DEBUG("Invalid %s %03x %03x %03x", op, ctx->opcode >> 26, \
ctx->opcode & 0x3F, ((ctx->opcode >> 16) & 0x1F)); \
} while (0)
/* General purpose registers moves. */
static inline void gen_load_gpr (TCGv t, int reg)
{
if (reg == 0)
tcg_gen_movi_tl(t, 0);
else
tcg_gen_mov_tl(t, cpu_gpr[reg]);
}
static inline void gen_store_gpr (TCGv t, int reg)
{
if (reg != 0)
tcg_gen_mov_tl(cpu_gpr[reg], t);
}
/* Moves to/from ACX register. */
static inline void gen_load_ACX (TCGv t, int reg)
{
tcg_gen_mov_tl(t, cpu_ACX[reg]);
}
static inline void gen_store_ACX (TCGv t, int reg)
{
tcg_gen_mov_tl(cpu_ACX[reg], t);
}
/* Moves to/from shadow registers. */
static inline void gen_load_srsgpr (int from, int to)
{
TCGv t0 = tcg_temp_new();
if (from == 0)
tcg_gen_movi_tl(t0, 0);
else {
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
tcg_gen_ext_i32_ptr(addr, t2);
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
tcg_temp_free_ptr(addr);
tcg_temp_free_i32(t2);
}
gen_store_gpr(t0, to);
tcg_temp_free(t0);
}
static inline void gen_store_srsgpr (int from, int to)
{
if (to != 0) {
TCGv t0 = tcg_temp_new();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
gen_load_gpr(t0, from);
tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
tcg_gen_ext_i32_ptr(addr, t2);
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
tcg_temp_free_ptr(addr);
tcg_temp_free_i32(t2);
tcg_temp_free(t0);
}
}
/* Floating point register moves. */
static inline void gen_load_fpr32 (TCGv_i32 t, int reg)
{
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
}
static inline void gen_store_fpr32 (TCGv_i32 t, int reg)
{
tcg_gen_st_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
}
static inline void gen_load_fpr32h (TCGv_i32 t, int reg)
{
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
}
static inline void gen_store_fpr32h (TCGv_i32 t, int reg)
{
tcg_gen_st_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
}
static inline void gen_load_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
tcg_gen_ld_i64(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].d));
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
gen_load_fpr32(t0, reg & ~1);
gen_load_fpr32(t1, reg | 1);
tcg_gen_concat_i32_i64(t, t0, t1);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
}
}
static inline void gen_store_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
tcg_gen_st_i64(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].d));
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(t1, t);
gen_store_fpr32(t1, reg & ~1);
tcg_gen_shri_i64(t0, t, 32);
tcg_gen_trunc_i64_i32(t1, t0);
gen_store_fpr32(t1, reg | 1);
tcg_temp_free_i32(t1);
tcg_temp_free_i64(t0);
}
}
static inline int get_fp_bit (int cc)
{
if (cc)
return 24 + cc;
else
return 23;
}
#define FOP_CONDS(type, fmt, bits) \
static inline void gen_cmp ## type ## _ ## fmt(int n, TCGv_i##bits a, \
TCGv_i##bits b, int cc) \
{ \
switch (n) { \
case 0: gen_helper_3i(cmp ## type ## _ ## fmt ## _f, cpu_env, a, b, cc); break;\
case 1: gen_helper_3i(cmp ## type ## _ ## fmt ## _un, cpu_env, a, b, cc); break;\
case 2: gen_helper_3i(cmp ## type ## _ ## fmt ## _eq, cpu_env, a, b, cc); break;\
case 3: gen_helper_3i(cmp ## type ## _ ## fmt ## _ueq, cpu_env, a, b, cc); break;\
case 4: gen_helper_3i(cmp ## type ## _ ## fmt ## _olt, cpu_env, a, b, cc); break;\
case 5: gen_helper_3i(cmp ## type ## _ ## fmt ## _ult, cpu_env, a, b, cc); break;\
case 6: gen_helper_3i(cmp ## type ## _ ## fmt ## _ole, cpu_env, a, b, cc); break;\
case 7: gen_helper_3i(cmp ## type ## _ ## fmt ## _ule, cpu_env, a, b, cc); break;\
case 8: gen_helper_3i(cmp ## type ## _ ## fmt ## _sf, cpu_env, a, b, cc); break;\
case 9: gen_helper_3i(cmp ## type ## _ ## fmt ## _ngle, cpu_env, a, b, cc); break;\
case 10: gen_helper_3i(cmp ## type ## _ ## fmt ## _seq, cpu_env, a, b, cc); break;\
case 11: gen_helper_3i(cmp ## type ## _ ## fmt ## _ngl, cpu_env, a, b, cc); break;\
case 12: gen_helper_3i(cmp ## type ## _ ## fmt ## _lt, cpu_env, a, b, cc); break;\
case 13: gen_helper_3i(cmp ## type ## _ ## fmt ## _nge, cpu_env, a, b, cc); break;\
case 14: gen_helper_3i(cmp ## type ## _ ## fmt ## _le, cpu_env, a, b, cc); break;\
case 15: gen_helper_3i(cmp ## type ## _ ## fmt ## _ngt, cpu_env, a, b, cc); break;\
default: abort(); \
} \
}
FOP_CONDS(, d, 64)
FOP_CONDS(abs, d, 64)
FOP_CONDS(, s, 32)
FOP_CONDS(abs, s, 32)
FOP_CONDS(, ps, 64)
FOP_CONDS(abs, ps, 64)
#undef FOP_CONDS
/* Tests */
#define OP_COND(name, cond) \
static inline void glue(gen_op_, name) (TCGv ret, TCGv t0, TCGv t1) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
tcg_gen_brcond_tl(cond, t0, t1, l1); \
tcg_gen_movi_tl(ret, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
tcg_gen_movi_tl(ret, 1); \
gen_set_label(l2); \
}
OP_COND(eq, TCG_COND_EQ);
OP_COND(ne, TCG_COND_NE);
OP_COND(ge, TCG_COND_GE);
OP_COND(geu, TCG_COND_GEU);
OP_COND(lt, TCG_COND_LT);
OP_COND(ltu, TCG_COND_LTU);
#undef OP_COND
#define OP_CONDI(name, cond) \
static inline void glue(gen_op_, name) (TCGv ret, TCGv t0, target_ulong val) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
tcg_gen_brcondi_tl(cond, t0, val, l1); \
tcg_gen_movi_tl(ret, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
tcg_gen_movi_tl(ret, 1); \
gen_set_label(l2); \
}
OP_CONDI(lti, TCG_COND_LT);
OP_CONDI(ltiu, TCG_COND_LTU);
#undef OP_CONDI
#define OP_CONDZ(name, cond) \
static inline void glue(gen_op_, name) (TCGv ret, TCGv t0) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
tcg_gen_brcondi_tl(cond, t0, 0, l1); \
tcg_gen_movi_tl(ret, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
tcg_gen_movi_tl(ret, 1); \
gen_set_label(l2); \
}
OP_CONDZ(gez, TCG_COND_GE);
OP_CONDZ(gtz, TCG_COND_GT);
OP_CONDZ(lez, TCG_COND_LE);
OP_CONDZ(ltz, TCG_COND_LT);
#undef OP_CONDZ
static inline void gen_save_pc(target_ulong pc)
{
tcg_gen_movi_tl(cpu_PC, pc);
}
static inline void save_cpu_state (DisasContext *ctx, int do_save_pc)
{
LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags);
if (do_save_pc && ctx->pc != ctx->saved_pc) {
gen_save_pc(ctx->pc);
ctx->saved_pc = ctx->pc;
}
if (ctx->hflags != ctx->saved_hflags) {
tcg_gen_movi_i32(hflags, ctx->hflags);
ctx->saved_hflags = ctx->hflags;
switch (ctx->hflags & MIPS_HFLAG_BMASK) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
case MIPS_HFLAG_BL:
case MIPS_HFLAG_B:
tcg_gen_movi_tl(btarget, ctx->btarget);
break;
}
}
}
static inline void restore_cpu_state (CPUMIPSState *env, DisasContext *ctx)
{
ctx->saved_hflags = ctx->hflags;
switch (ctx->hflags & MIPS_HFLAG_BMASK) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
case MIPS_HFLAG_BL:
case MIPS_HFLAG_B:
ctx->btarget = env->btarget;
break;
}
}
static inline void
generate_exception_err (DisasContext *ctx, int excp, int err)
{
TCGv_i32 texcp = tcg_const_i32(excp);
TCGv_i32 terr = tcg_const_i32(err);
save_cpu_state(ctx, 1);
gen_helper_raise_exception_err(cpu_env, texcp, terr);
tcg_temp_free_i32(terr);
tcg_temp_free_i32(texcp);
}
static inline void
generate_exception (DisasContext *ctx, int excp)
{
save_cpu_state(ctx, 1);
gen_helper_1i(raise_exception, cpu_env, excp);
}
/* Addresses computation */
static inline void gen_op_addr_add (DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
{
tcg_gen_add_tl(ret, arg0, arg1);
#if defined(TARGET_MIPS64)
/* For compatibility with 32-bit code, data reference in user mode
with Status_UX = 0 should be casted to 32-bit and sign extended.
See the MIPS64 PRA manual, section 4.10. */
if (((ctx->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
!(ctx->hflags & MIPS_HFLAG_UX)) {
tcg_gen_ext32s_i64(ret, ret);
}
#endif
}
static inline void check_cp0_enabled(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0)))
generate_exception_err(ctx, EXCP_CpU, 0);
}
static inline void check_cp1_enabled(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU)))
generate_exception_err(ctx, EXCP_CpU, 1);
}
/* Verify that the processor is running with COP1X instructions enabled.
This is associated with the nabla symbol in the MIPS32 and MIPS64
opcode tables. */
static inline void check_cop1x(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X)))
generate_exception(ctx, EXCP_RI);
}
/* Verify that the processor is running with 64-bit floating-point
operations enabled. */
static inline void check_cp1_64bitmode(DisasContext *ctx)
{
if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X)))
generate_exception(ctx, EXCP_RI);
}
/*
* Verify if floating point register is valid; an operation is not defined
* if bit 0 of any register specification is set and the FR bit in the
* Status register equals zero, since the register numbers specify an
* even-odd pair of adjacent coprocessor general registers. When the FR bit
* in the Status register equals one, both even and odd register numbers
* are valid. This limitation exists only for 64 bit wide (d,l,ps) registers.
*
* Multiple 64 bit wide registers can be checked by calling
* gen_op_cp1_registers(freg1 | freg2 | ... | fregN);
*/
static inline void check_cp1_registers(DisasContext *ctx, int regs)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1)))
generate_exception(ctx, EXCP_RI);
}
/* This code generates a "reserved instruction" exception if the
CPU does not support the instruction set corresponding to flags. */
static inline void check_insn(CPUMIPSState *env, DisasContext *ctx, int flags)
{
if (unlikely(!(env->insn_flags & flags)))
generate_exception(ctx, EXCP_RI);
}
/* This code generates a "reserved instruction" exception if 64-bit
instructions are not enabled. */
static inline void check_mips_64(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_64)))
generate_exception(ctx, EXCP_RI);
}
/* load/store instructions. */
#define OP_LD(insn,fname) \
static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
{ \
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
}
OP_LD(lb,ld8s);
OP_LD(lbu,ld8u);
OP_LD(lh,ld16s);
OP_LD(lhu,ld16u);
OP_LD(lw,ld32s);
#if defined(TARGET_MIPS64)
OP_LD(lwu,ld32u);
OP_LD(ld,ld64);
#endif
#undef OP_LD
#define OP_ST(insn,fname) \
static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, DisasContext *ctx) \
{ \
tcg_gen_qemu_##fname(arg1, arg2, ctx->mem_idx); \
}
OP_ST(sb,st8);
OP_ST(sh,st16);
OP_ST(sw,st32);
#if defined(TARGET_MIPS64)
OP_ST(sd,st64);
#endif
#undef OP_ST
#ifdef CONFIG_USER_ONLY
#define OP_LD_ATOMIC(insn,fname) \
static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
{ \
TCGv t0 = tcg_temp_new(); \
tcg_gen_mov_tl(t0, arg1); \
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
tcg_temp_free(t0); \
}
#else
#define OP_LD_ATOMIC(insn,fname) \
static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
{ \
gen_helper_3i(insn, ret, cpu_env, arg1, ctx->mem_idx); \
}
#endif
OP_LD_ATOMIC(ll,ld32s);
#if defined(TARGET_MIPS64)
OP_LD_ATOMIC(lld,ld64);
#endif
#undef OP_LD_ATOMIC
#ifdef CONFIG_USER_ONLY
#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
{ \
TCGv t0 = tcg_temp_new(); \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
tcg_gen_andi_tl(t0, arg2, almask); \
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); \
tcg_gen_st_tl(arg2, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); \
generate_exception(ctx, EXCP_AdES); \
gen_set_label(l1); \
tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_brcond_tl(TCG_COND_NE, arg2, t0, l2); \
tcg_gen_movi_tl(t0, rt | ((almask << 3) & 0x20)); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, llreg)); \
tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUMIPSState, llnewval)); \
gen_helper_1i(raise_exception, cpu_env, EXCP_SC); \
gen_set_label(l2); \
tcg_gen_movi_tl(t0, 0); \
gen_store_gpr(t0, rt); \
tcg_temp_free(t0); \
}
#else
#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
{ \
TCGv t0 = tcg_temp_new(); \
gen_helper_4i(insn, t0, cpu_env, arg1, arg2, ctx->mem_idx); \
gen_store_gpr(t0, rt); \
tcg_temp_free(t0); \
}
#endif
OP_ST_ATOMIC(sc,st32,ld32s,0x3);
#if defined(TARGET_MIPS64)
OP_ST_ATOMIC(scd,st64,ld64,0x7);
#endif
#undef OP_ST_ATOMIC
/* Load and store */
static void gen_ldst (DisasContext *ctx, uint32_t opc, int rt,
int base, int16_t offset)
{
const char * __attribute__((unused)) opn = "ldst";
TCGv t0, t1, t2;
t0 = tcg_temp_new();
t1 = tcg_temp_new();
if (base == 0) {
tcg_gen_movi_tl(t0, offset);
} else if (offset == 0) {
gen_load_gpr(t0, base);
} else {
tcg_gen_movi_tl(t0, offset);
gen_op_addr_add(ctx, t0, cpu_gpr[base], t0);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
save_cpu_state(ctx, 0);
op_ldst_lwu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lwu";
break;
case OPC_LD:
save_cpu_state(ctx, 0);
op_ldst_ld(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "ld";
break;
case OPC_LLD:
save_cpu_state(ctx, 0);
op_ldst_lld(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lld";
break;
case OPC_SD:
save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
op_ldst_sd(t1, t0, ctx);
opn = "sd";
break;
case OPC_LDL:
tcg_gen_andi_tl(t1, t0, 7);
#ifndef TARGET_WORDS_BIGENDIAN
tcg_gen_xori_tl(t1, t1, 7);
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
tcg_gen_shl_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0x7fffffffffffffffull);
tcg_gen_shr_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_or_tl(t0, t0, t1);
gen_store_gpr(t0, rt);
opn = "ldl";
break;
case OPC_SDL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
gen_helper_3i(sdl, cpu_env, t1, t0, ctx->mem_idx);
opn = "sdl";
break;
case OPC_LDR:
tcg_gen_andi_tl(t1, t0, 7);
#ifdef TARGET_WORDS_BIGENDIAN
tcg_gen_xori_tl(t1, t1, 7);
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_or_tl(t0, t0, t1);
gen_store_gpr(t0, rt);
opn = "ldr";
break;
case OPC_SDR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
gen_helper_3i(sdr, cpu_env, t1, t0, ctx->mem_idx);
opn = "sdr";
break;
#endif
case OPC_LW:
save_cpu_state(ctx, 0);
op_ldst_lw(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lw";
break;
case OPC_SW:
save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
op_ldst_sw(t1, t0, ctx);
opn = "sw";
break;
case OPC_LH:
save_cpu_state(ctx, 0);
op_ldst_lh(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lh";
break;
case OPC_SH:
save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
op_ldst_sh(t1, t0, ctx);
opn = "sh";
break;
case OPC_LHU:
save_cpu_state(ctx, 0);
op_ldst_lhu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lhu";
break;
case OPC_LB:
save_cpu_state(ctx, 0);
op_ldst_lb(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lb";
break;
case OPC_SB:
save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
op_ldst_sb(t1, t0, ctx);
opn = "sb";
break;
case OPC_LBU:
save_cpu_state(ctx, 0);
op_ldst_lbu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lbu";
break;
case OPC_LWL:
tcg_gen_andi_tl(t1, t0, 3);
#ifndef TARGET_WORDS_BIGENDIAN
tcg_gen_xori_tl(t1, t1, 3);
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
tcg_gen_shl_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 31);
t2 = tcg_const_tl(0x7fffffffull);
tcg_gen_shr_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, rt);
opn = "lwl";
break;
case OPC_SWL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
gen_helper_3i(swl, cpu_env, t1, t0, ctx->mem_idx);
opn = "swl";
break;
case OPC_LWR:
tcg_gen_andi_tl(t1, t0, 3);
#ifdef TARGET_WORDS_BIGENDIAN
tcg_gen_xori_tl(t1, t1, 3);
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 31);
t2 = tcg_const_tl(0xfffffffeull);
tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_or_tl(t0, t0, t1);
gen_store_gpr(t0, rt);
opn = "lwr";
break;
case OPC_SWR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
gen_helper_3i(swr, cpu_env, t1, t0, ctx->mem_idx);
opn = "swr";
break;
case OPC_LL:
save_cpu_state(ctx, 1);
op_ldst_ll(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "ll";
break;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]);
tcg_temp_free(t0);
tcg_temp_free(t1);
}
/* Store conditional */
static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt,
int base, int16_t offset)
{
const char * __attribute__((unused)) opn = "st_cond";
TCGv t0, t1;
t0 = tcg_temp_local_new();
if (base == 0) {
tcg_gen_movi_tl(t0, offset);
} else if (offset == 0) {
gen_load_gpr(t0, base);
} else {
tcg_gen_movi_tl(t0, offset);
gen_op_addr_add(ctx, t0, cpu_gpr[base], t0);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
t1 = tcg_temp_local_new();
gen_load_gpr(t1, rt);
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SCD:
save_cpu_state(ctx, 0);
op_ldst_scd(t1, t0, rt, ctx);
opn = "scd";
break;
#endif
case OPC_SC:
save_cpu_state(ctx, 1);
op_ldst_sc(t1, t0, rt, ctx);
opn = "sc";
break;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]);
tcg_temp_free(t1);
tcg_temp_free(t0);
}
/* Load and store */
static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
int base, int16_t offset)
{
const char * __attribute__((unused)) opn = "flt_ldst";
TCGv t0 = tcg_temp_new();
if (base == 0) {
tcg_gen_movi_tl(t0, offset);
} else if (offset == 0) {
gen_load_gpr(t0, base);
} else {
tcg_gen_movi_tl(t0, offset);
gen_op_addr_add(ctx, t0, cpu_gpr[base], t0);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
case OPC_LWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(fp0, ft);
tcg_temp_free_i32(fp0);
}
opn = "lwc1";
break;
case OPC_SWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv t1 = tcg_temp_new();
gen_load_fpr32(fp0, ft);
tcg_gen_extu_i32_tl(t1, fp0);
tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
tcg_temp_free(t1);
tcg_temp_free_i32(fp0);
}
opn = "swc1";
break;
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(fp0, t0, ctx->mem_idx);
gen_store_fpr64(ctx, fp0, ft);
tcg_temp_free_i64(fp0);
}
opn = "ldc1";
break;
case OPC_SDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, ft);
tcg_gen_qemu_st64(fp0, t0, ctx->mem_idx);
tcg_temp_free_i64(fp0);
}
opn = "sdc1";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
goto out;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, fregnames[ft], offset, regnames[base]);
out:
tcg_temp_free(t0);
}
/* Arithmetic with immediate operand */
static void gen_arith_imm (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rt, int rs, int16_t imm)
{
target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
const char * __attribute__((unused)) opn = "imm arith";
if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) {
/* If no destination, treat it as a NOP.
For addi, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
return;
}
switch (opc) {
case OPC_ADDI:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
tcg_gen_addi_tl(t0, t1, uimm);
tcg_gen_ext32s_tl(t0, t0);
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, rt);
tcg_temp_free(t0);
}
opn = "addi";
break;
case OPC_ADDIU:
if (rs != 0) {
tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
} else {
tcg_gen_movi_tl(cpu_gpr[rt], uimm);
}
opn = "addiu";
break;
#if defined(TARGET_MIPS64)
case OPC_DADDI:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
tcg_gen_addi_tl(t0, t1, uimm);
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rt);
tcg_temp_free(t0);
}
opn = "daddi";
break;
case OPC_DADDIU:
if (rs != 0) {
tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
} else {
tcg_gen_movi_tl(cpu_gpr[rt], uimm);
}
opn = "daddiu";
break;
#endif
}
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
}
/* Logic with immediate operand */
static void gen_logic_imm (CPUMIPSState *env, uint32_t opc, int rt, int rs, int16_t imm)
{
target_ulong uimm;
const char * __attribute__((unused)) opn = "imm logic";
if (rt == 0) {
/* If no destination, treat it as a NOP. */
MIPS_DEBUG("NOP");
return;
}
uimm = (uint16_t)imm;
switch (opc) {
case OPC_ANDI:
if (likely(rs != 0))
tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
else
tcg_gen_movi_tl(cpu_gpr[rt], 0);
opn = "andi";
break;
case OPC_ORI:
if (rs != 0)
tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
else
tcg_gen_movi_tl(cpu_gpr[rt], uimm);
opn = "ori";
break;
case OPC_XORI:
if (likely(rs != 0))
tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
else
tcg_gen_movi_tl(cpu_gpr[rt], uimm);
opn = "xori";
break;
case OPC_LUI:
tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
opn = "lui";
break;
}
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
}
/* Set on less than with immediate operand */
static void gen_slt_imm (CPUMIPSState *env, uint32_t opc, int rt, int rs, int16_t imm)
{
target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
const char * __attribute__((unused)) opn = "imm arith";
TCGv t0;
if (rt == 0) {
/* If no destination, treat it as a NOP. */
MIPS_DEBUG("NOP");
return;
}
t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
switch (opc) {
case OPC_SLTI:
gen_op_lti(cpu_gpr[rt], t0, uimm);
opn = "slti";
break;
case OPC_SLTIU:
gen_op_ltiu(cpu_gpr[rt], t0, uimm);
opn = "sltiu";
break;
}
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
tcg_temp_free(t0);
}
/* Shifts with immediate operand */
static void gen_shift_imm(CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rt, int rs, int16_t imm)
{
target_ulong uimm = ((uint16_t)imm) & 0x1f;
const char* __attribute__((unused)) opn = "imm shift";
TCGv t0;
if (rt == 0) {
/* If no destination, treat it as a NOP. */
MIPS_DEBUG("NOP");
return;
}
t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
switch (opc) {
case OPC_SLL:
tcg_gen_shli_tl(t0, t0, uimm);
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
opn = "sll";
break;
case OPC_SRA:
tcg_gen_ext32s_tl(t0, t0);
tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
opn = "sra";
break;
case OPC_SRL:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
if (uimm != 0) {
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
} else {
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
opn = "srl";
break;
case 1:
/* rotr is decoded as srl on non-R2 CPUs */
if (env->insn_flags & ISA_MIPS32R2) {
if (uimm != 0) {
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, t0);
tcg_gen_rotri_i32(t1, t1, uimm);
tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
tcg_temp_free_i32(t1);
} else {
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
opn = "rotr";
} else {
if (uimm != 0) {
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
} else {
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
opn = "srl";
}
break;
default:
MIPS_INVAL("invalid srl flag");
generate_exception(ctx, EXCP_RI);
break;
}
break;
#if defined(TARGET_MIPS64)
case OPC_DSLL:
tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm);
opn = "dsll";
break;
case OPC_DSRA:
tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
opn = "dsra";
break;
case OPC_DSRL:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
opn = "dsrl";
break;
case 1:
/* drotr is decoded as dsrl on non-R2 CPUs */
if (env->insn_flags & ISA_MIPS32R2) {
if (uimm != 0) {
tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm);
} else {
tcg_gen_mov_tl(cpu_gpr[rt], t0);
}
opn = "drotr";
} else {
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
opn = "dsrl";
}
break;
default:
MIPS_INVAL("invalid dsrl flag");
generate_exception(ctx, EXCP_RI);
break;
}
break;
case OPC_DSLL32:
tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsll32";
break;
case OPC_DSRA32:
tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsra32";
break;
case OPC_DSRL32:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsrl32";
break;
case 1:
/* drotr32 is decoded as dsrl32 on non-R2 CPUs */
if (env->insn_flags & ISA_MIPS32R2) {
tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "drotr32";
} else {
tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsrl32";
}
break;
default:
MIPS_INVAL("invalid dsrl32 flag");
generate_exception(ctx, EXCP_RI);
break;
}
break;
#endif
}
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
tcg_temp_free(t0);
}
/* Arithmetic */
static void gen_arith (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "arith";
if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
&& opc != OPC_DADD && opc != OPC_DSUB) {
/* If no destination, treat it as a NOP.
For add & sub, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
return;
}
switch (opc) {
case OPC_ADD:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
gen_load_gpr(t2, rt);
tcg_gen_add_tl(t0, t1, t2);
tcg_gen_ext32s_tl(t0, t0);
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_not_tl(t1, t1);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
opn = "add";
break;
case OPC_ADDU:
if (rs != 0 && rt != 0) {
tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
} else if (rs == 0 && rt != 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "addu";
break;
case OPC_SUB:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
gen_load_gpr(t2, rt);
tcg_gen_sub_tl(t0, t1, t2);
tcg_gen_ext32s_tl(t0, t0);
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of different sign, first operand and result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
opn = "sub";
break;
case OPC_SUBU:
if (rs != 0 && rt != 0) {
tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
} else if (rs == 0 && rt != 0) {
tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "subu";
break;
#if defined(TARGET_MIPS64)
case OPC_DADD:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
gen_load_gpr(t2, rt);
tcg_gen_add_tl(t0, t1, t2);
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_not_tl(t1, t1);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
opn = "dadd";
break;
case OPC_DADDU:
if (rs != 0 && rt != 0) {
tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else if (rs == 0 && rt != 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "daddu";
break;
case OPC_DSUB:
{
TCGv t0 = tcg_temp_local_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
gen_load_gpr(t1, rs);
gen_load_gpr(t2, rt);
tcg_gen_sub_tl(t0, t1, t2);
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_temp_free(t1);
/* operands of different sign, first operand and result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
opn = "dsub";
break;
case OPC_DSUBU:
if (rs != 0 && rt != 0) {
tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else if (rs == 0 && rt != 0) {
tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "dsubu";
break;
#endif
case OPC_MUL:
if (likely(rs != 0 && rt != 0)) {
tcg_gen_mul_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "mul";
break;
}
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
}
/* Conditional move */
static void gen_cond_move (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "cond move";
int l1;
if (rd == 0) {
/* If no destination, treat it as a NOP.
For add & sub, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
return;
}
l1 = gen_new_label();
switch (opc) {
case OPC_MOVN:
if (likely(rt != 0))
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rt], 0, l1);
else
tcg_gen_br(l1);
opn = "movn";
break;
case OPC_MOVZ:
if (likely(rt != 0))
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rt], 0, l1);
opn = "movz";
break;
}
if (rs != 0)
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
else
tcg_gen_movi_tl(cpu_gpr[rd], 0);
gen_set_label(l1);
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
}
/* Logic */
static void gen_logic (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "logic";
if (rd == 0) {
/* If no destination, treat it as a NOP. */
MIPS_DEBUG("NOP");
return;
}
switch (opc) {
case OPC_AND:
if (likely(rs != 0 && rt != 0)) {
tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "and";
break;
case OPC_NOR:
if (rs != 0 && rt != 0) {
tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else if (rs == 0 && rt != 0) {
tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
}
opn = "nor";
break;
case OPC_OR:
if (likely(rs != 0 && rt != 0)) {
tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else if (rs == 0 && rt != 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "or";
break;
case OPC_XOR:
if (likely(rs != 0 && rt != 0)) {
tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
} else if (rs == 0 && rt != 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
} else if (rs != 0 && rt == 0) {
tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
} else {
tcg_gen_movi_tl(cpu_gpr[rd], 0);
}
opn = "xor";
break;
}
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
}
/* Set on lower than */
static void gen_slt (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "slt";
TCGv t0, t1;
if (rd == 0) {
/* If no destination, treat it as a NOP. */
MIPS_DEBUG("NOP");
return;
}
t0 = tcg_temp_new();
t1 = tcg_temp_new();
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
switch (opc) {
case OPC_SLT:
gen_op_lt(cpu_gpr[rd], t0, t1);
opn = "slt";
break;
case OPC_SLTU:
gen_op_ltu(cpu_gpr[rd], t0, t1);
opn = "sltu";
break;
}
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
tcg_temp_free(t0);
tcg_temp_free(t1);
}
/* Shifts */
static void gen_shift (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "shifts";
TCGv t0, t1;
if (rd == 0) {
/* If no destination, treat it as a NOP.
For add & sub, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
return;
}
t0 = tcg_temp_new();
t1 = tcg_temp_new();
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
switch (opc) {
case OPC_SLLV:
tcg_gen_andi_tl(t0, t0, 0x1f);
tcg_gen_shl_tl(t0, t1, t0);
tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
opn = "sllv";
break;
case OPC_SRAV:
tcg_gen_ext32s_tl(t1, t1);
tcg_gen_andi_tl(t0, t0, 0x1f);
tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
opn = "srav";
break;
case OPC_SRLV:
switch ((ctx->opcode >> 6) & 0x1f) {
case 0:
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_andi_tl(t0, t0, 0x1f);
tcg_gen_shr_tl(t0, t1, t0);
tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
opn = "srlv";
break;
case 1:
/* rotrv is decoded as srlv on non-R2 CPUs */
if (env->insn_flags & ISA_MIPS32R2) {
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t2, t0);
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_andi_i32(t2, t2, 0x1f);
tcg_gen_rotr_i32(t2, t3, t2);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
opn = "rotrv";
} else {
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_andi_tl(t0, t0, 0x1f);
tcg_gen_shr_tl(t0, t1, t0);
tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
opn = "srlv";
}
break;
default:
MIPS_INVAL("invalid srlv flag");
generate_exception(ctx, EXCP_RI);
break;
}
break;
#if defined(TARGET_MIPS64)
case OPC_DSLLV:
tcg_gen_andi_tl(t0, t0, 0x3f);
tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
opn = "dsllv";
break;
case OPC_DSRAV:
tcg_gen_andi_tl(t0, t0, 0x3f);
tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
opn = "dsrav";
break;
case OPC_DSRLV:
switch ((ctx->opcode >> 6) & 0x1f) {
case 0:
tcg_gen_andi_tl(t0, t0, 0x3f);
tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
opn = "dsrlv";
break;
case 1:
/* drotrv is decoded as dsrlv on non-R2 CPUs */
if (env->insn_flags & ISA_MIPS32R2) {
tcg_gen_andi_tl(t0, t0, 0x3f);
tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
opn = "drotrv";
} else {
tcg_gen_andi_tl(t0, t0, 0x3f);
tcg_gen_shr_tl(t0, t1, t0);
opn = "dsrlv";
}
break;
default:
MIPS_INVAL("invalid dsrlv flag");
generate_exception(ctx, EXCP_RI);
break;
}
break;
#endif
}
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
tcg_temp_free(t0);
tcg_temp_free(t1);
}
/* Arithmetic on HI/LO registers */
static void gen_HILO (DisasContext *ctx, uint32_t opc, int reg)
{
const char* __attribute__((unused)) opn = "hilo";
if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) {
/* Treat as NOP. */
MIPS_DEBUG("NOP");
return;
}
switch (opc) {
case OPC_MFHI:
tcg_gen_mov_tl(cpu_gpr[reg], cpu_HI[0]);
opn = "mfhi";
break;
case OPC_MFLO:
tcg_gen_mov_tl(cpu_gpr[reg], cpu_LO[0]);
opn = "mflo";
break;
case OPC_MTHI:
if (reg != 0)
tcg_gen_mov_tl(cpu_HI[0], cpu_gpr[reg]);
else
tcg_gen_movi_tl(cpu_HI[0], 0);
opn = "mthi";
break;
case OPC_MTLO:
if (reg != 0)
tcg_gen_mov_tl(cpu_LO[0], cpu_gpr[reg]);
else
tcg_gen_movi_tl(cpu_LO[0], 0);
opn = "mtlo";
break;
}
MIPS_DEBUG("%s %s", opn, regnames[reg]);
}
static void gen_muldiv (DisasContext *ctx, uint32_t opc,
int rs, int rt)
{
const char* __attribute__((unused)) opn = "mul/div";
TCGv t0, t1;
switch (opc) {
case OPC_DIV:
case OPC_DIVU:
#if defined(TARGET_MIPS64)
case OPC_DDIV:
case OPC_DDIVU:
#endif
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
break;
default:
t0 = tcg_temp_new();
t1 = tcg_temp_new();
break;
}
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
switch (opc) {
case OPC_DIV:
{
int l1 = gen_new_label();
int l2 = gen_new_label();
tcg_gen_ext32s_tl(t0, t0);
tcg_gen_ext32s_tl(t1, t1);
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
tcg_gen_mov_tl(cpu_LO[0], t0);
tcg_gen_movi_tl(cpu_HI[0], 0);
tcg_gen_br(l1);
gen_set_label(l2);
tcg_gen_div_tl(cpu_LO[0], t0, t1);
tcg_gen_rem_tl(cpu_HI[0], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[0], cpu_LO[0]);
tcg_gen_ext32s_tl(cpu_HI[0], cpu_HI[0]);
gen_set_label(l1);
}
opn = "div";
break;
case OPC_DIVU:
{
int l1 = gen_new_label();
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
tcg_gen_divu_tl(cpu_LO[0], t0, t1);
tcg_gen_remu_tl(cpu_HI[0], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[0], cpu_LO[0]);
tcg_gen_ext32s_tl(cpu_HI[0], cpu_HI[0]);
gen_set_label(l1);
}
opn = "divu";
break;
case OPC_MULT:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_tl_i64(t2, t0);
tcg_gen_ext_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "mult";
break;
case OPC_MULTU:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_extu_tl_i64(t2, t0);
tcg_gen_extu_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "multu";
break;
#if defined(TARGET_MIPS64)
case OPC_DDIV:
{
int l1 = gen_new_label();
int l2 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
tcg_gen_mov_tl(cpu_LO[0], t0);
tcg_gen_movi_tl(cpu_HI[0], 0);
tcg_gen_br(l1);
gen_set_label(l2);
tcg_gen_div_i64(cpu_LO[0], t0, t1);
tcg_gen_rem_i64(cpu_HI[0], t0, t1);
gen_set_label(l1);
}
opn = "ddiv";
break;
case OPC_DDIVU:
{
int l1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
tcg_gen_divu_i64(cpu_LO[0], t0, t1);
tcg_gen_remu_i64(cpu_HI[0], t0, t1);
gen_set_label(l1);
}
opn = "ddivu";
break;
case OPC_DMULT:
gen_helper_dmult(cpu_env, t0, t1);
opn = "dmult";
break;
case OPC_DMULTU:
gen_helper_dmultu(cpu_env, t0, t1);
opn = "dmultu";
break;
#endif
case OPC_MADD:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_tl_i64(t2, t0);
tcg_gen_ext_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
tcg_gen_add_i64(t2, t2, t3);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "madd";
break;
case OPC_MADDU:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_extu_tl_i64(t2, t0);
tcg_gen_extu_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
tcg_gen_add_i64(t2, t2, t3);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "maddu";
break;
case OPC_MSUB:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext_tl_i64(t2, t0);
tcg_gen_ext_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
tcg_gen_sub_i64(t2, t3, t2);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "msub";
break;
case OPC_MSUBU:
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_extu_tl_i64(t2, t0);
tcg_gen_extu_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
tcg_gen_sub_i64(t2, t3, t2);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "msubu";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
goto out;
}
MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]);
out:
tcg_temp_free(t0);
tcg_temp_free(t1);
}
static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "mul vr54xx";
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
switch (opc) {
case OPC_VR54XX_MULS:
gen_helper_muls(t0, cpu_env, t0, t1);
opn = "muls";
break;
case OPC_VR54XX_MULSU:
gen_helper_mulsu(t0, cpu_env, t0, t1);
opn = "mulsu";
break;
case OPC_VR54XX_MACC:
gen_helper_macc(t0, cpu_env, t0, t1);
opn = "macc";
break;
case OPC_VR54XX_MACCU:
gen_helper_maccu(t0, cpu_env, t0, t1);
opn = "maccu";
break;
case OPC_VR54XX_MSAC:
gen_helper_msac(t0, cpu_env, t0, t1);
opn = "msac";
break;
case OPC_VR54XX_MSACU:
gen_helper_msacu(t0, cpu_env, t0, t1);
opn = "msacu";
break;
case OPC_VR54XX_MULHI:
gen_helper_mulhi(t0, cpu_env, t0, t1);
opn = "mulhi";
break;
case OPC_VR54XX_MULHIU:
gen_helper_mulhiu(t0, cpu_env, t0, t1);
opn = "mulhiu";
break;
case OPC_VR54XX_MULSHI:
gen_helper_mulshi(t0, cpu_env, t0, t1);
opn = "mulshi";
break;
case OPC_VR54XX_MULSHIU:
gen_helper_mulshiu(t0, cpu_env, t0, t1);
opn = "mulshiu";
break;
case OPC_VR54XX_MACCHI:
gen_helper_macchi(t0, cpu_env, t0, t1);
opn = "macchi";
break;
case OPC_VR54XX_MACCHIU:
gen_helper_macchiu(t0, cpu_env, t0, t1);
opn = "macchiu";
break;
case OPC_VR54XX_MSACHI:
gen_helper_msachi(t0, cpu_env, t0, t1);
opn = "msachi";
break;
case OPC_VR54XX_MSACHIU:
gen_helper_msachiu(t0, cpu_env, t0, t1);
opn = "msachiu";
break;
default:
MIPS_INVAL("mul vr54xx");
generate_exception(ctx, EXCP_RI);
goto out;
}
gen_store_gpr(t0, rd);
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
out:
tcg_temp_free(t0);
tcg_temp_free(t1);
}
static void gen_cl (DisasContext *ctx, uint32_t opc,
int rd, int rs)
{
const char* __attribute__((unused)) opn = "CLx";
TCGv t0;
if (rd == 0) {
/* Treat as NOP. */
MIPS_DEBUG("NOP");
return;
}
t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
switch (opc) {
case OPC_CLO:
gen_helper_clo(cpu_gpr[rd], t0);
opn = "clo";
break;
case OPC_CLZ:
gen_helper_clz(cpu_gpr[rd], t0);
opn = "clz";
break;
#if defined(TARGET_MIPS64)
case OPC_DCLO:
gen_helper_dclo(cpu_gpr[rd], t0);
opn = "dclo";
break;
case OPC_DCLZ:
gen_helper_dclz(cpu_gpr[rd], t0);
opn = "dclz";
break;
#endif
}
MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]);
tcg_temp_free(t0);
}
/* Traps */
static void gen_trap (DisasContext *ctx, uint32_t opc,
int rs, int rt, int16_t imm)
{
int cond;
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
cond = 0;
/* Load needed operands */
switch (opc) {
case OPC_TEQ:
case OPC_TGE:
case OPC_TGEU:
case OPC_TLT:
case OPC_TLTU:
case OPC_TNE:
/* Compare two registers */
if (rs != rt) {
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
cond = 1;
}
break;
case OPC_TEQI:
case OPC_TGEI:
case OPC_TGEIU:
case OPC_TLTI:
case OPC_TLTIU:
case OPC_TNEI:
/* Compare register to immediate */
if (rs != 0 || imm != 0) {
gen_load_gpr(t0, rs);
tcg_gen_movi_tl(t1, (int32_t)imm);
cond = 1;
}
break;
}
if (cond == 0) {
switch (opc) {
case OPC_TEQ: /* rs == rs */
case OPC_TEQI: /* r0 == 0 */
case OPC_TGE: /* rs >= rs */
case OPC_TGEI: /* r0 >= 0 */
case OPC_TGEU: /* rs >= rs unsigned */
case OPC_TGEIU: /* r0 >= 0 unsigned */
/* Always trap */
generate_exception(ctx, EXCP_TRAP);
break;
case OPC_TLT: /* rs < rs */
case OPC_TLTI: /* r0 < 0 */
case OPC_TLTU: /* rs < rs unsigned */
case OPC_TLTIU: /* r0 < 0 unsigned */
case OPC_TNE: /* rs != rs */
case OPC_TNEI: /* r0 != 0 */
/* Never trap: treat as NOP. */
break;
}
} else {
int l1 = gen_new_label();
switch (opc) {
case OPC_TEQ:
case OPC_TEQI:
tcg_gen_brcond_tl(TCG_COND_NE, t0, t1, l1);
break;
case OPC_TGE:
case OPC_TGEI:
tcg_gen_brcond_tl(TCG_COND_LT, t0, t1, l1);
break;
case OPC_TGEU:
case OPC_TGEIU:
tcg_gen_brcond_tl(TCG_COND_LTU, t0, t1, l1);
break;
case OPC_TLT:
case OPC_TLTI:
tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
break;
case OPC_TLTU:
case OPC_TLTIU:
tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
break;
case OPC_TNE:
case OPC_TNEI:
tcg_gen_brcond_tl(TCG_COND_EQ, t0, t1, l1);
break;
}
generate_exception(ctx, EXCP_TRAP);
gen_set_label(l1);
}
tcg_temp_free(t0);
tcg_temp_free(t1);
}
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!ctx->singlestep_enabled)) {
tcg_gen_goto_tb(n);