summaryrefslogtreecommitdiff
path: root/target-sparc
diff options
context:
space:
mode:
authorRobert Mustacchi <rm@joyent.com>2011-06-24 13:49:54 -0700
committerRobert Mustacchi <rm@joyent.com>2011-06-24 13:49:54 -0700
commit68396ea9c0fe4f75ce30b1eba2c44c43c13344bb (patch)
tree802587d411d9db461e6500c5b635043315f81c27 /target-sparc
downloadillumos-kvm-cmd-68396ea9c0fe4f75ce30b1eba2c44c43c13344bb.tar.gz
Initial commit of d32e8d0b8d9e0ef7cf7ab2e74548982972789dfc from qemu-kvm
Diffstat (limited to 'target-sparc')
-rw-r--r--target-sparc/TODO88
-rw-r--r--target-sparc/cpu.h659
-rw-r--r--target-sparc/exec.h41
-rw-r--r--target-sparc/helper.c1546
-rw-r--r--target-sparc/helper.h166
-rw-r--r--target-sparc/machine.c199
-rw-r--r--target-sparc/op_helper.c4535
-rw-r--r--target-sparc/translate.c5106
8 files changed, 12340 insertions, 0 deletions
diff --git a/target-sparc/TODO b/target-sparc/TODO
new file mode 100644
index 0000000..c87459f
--- /dev/null
+++ b/target-sparc/TODO
@@ -0,0 +1,88 @@
+TODO-list:
+
+CPU common:
+- Unimplemented features/bugs:
+ - Delay slot handling may fail sometimes (branch end of page, delay
+ slot next page)
+ - Atomical instructions
+ - CPU features should match real CPUs (also ASI selection)
+- Optimizations/improvements:
+ - Condition code/branch handling like x86, also for FPU?
+ - Remove remaining explicit alignment checks
+ - Global register for regwptr, so that windowed registers can be
+ accessed directly
+ - Improve Sparc32plus addressing
+ - NPC/PC static optimisations (use JUMP_TB when possible)? (Is this
+ obsolete?)
+ - Synthetic instructions
+ - MMU model dependant on CPU model
+ - Select ASI helper at translation time (on V9 only if known)
+ - KQemu/KVM support for VM only
+ - Hardware breakpoint/watchpoint support
+ - Cache emulation mode
+ - Reverse-endian pages
+ - Faster FPU emulation
+ - Busy loop detection
+
+Sparc32 CPUs:
+- Unimplemented features/bugs:
+ - Sun4/Sun4c MMUs
+ - Some V8 ASIs
+
+Sparc64 CPUs:
+- Unimplemented features/bugs:
+ - Interrupt handling
+ - Secondary address space, other MMU functions
+ - Many V9/UA2005/UA2007 ASIs
+ - Rest of V9 instructions, missing VIS instructions
+ - IG/MG/AG vs. UA2007 globals
+ - Full hypervisor support
+ - SMP/CMT
+ - Sun4v CPUs
+
+Sun4:
+- To be added
+
+Sun4c:
+- A lot of unimplemented features
+- Maybe split from Sun4m
+
+Sun4m:
+- Unimplemented features/bugs:
+ - Hardware devices do not match real boards
+ - Floppy does not work
+ - CS4231: merge with cs4231a, add DMA
+ - Add cg6, bwtwo
+ - Arbitrary resolution support
+ - PCI for MicroSparc-IIe
+ - JavaStation machines
+ - SBus slot probing, FCode ROM support
+ - SMP probing support
+ - Interrupt routing does not match real HW
+ - SuSE 7.3 keyboard sometimes unresponsive
+ - Gentoo 2004.1 SMP does not work
+ - SS600MP ledma -> lebuffer
+ - Type 5 keyboard
+ - Less fixed hardware choices
+ - DBRI audio (Am7930)
+ - BPP parallel
+ - Diagnostic switch
+ - ESP PIO mode
+
+Sun4d:
+- A lot of unimplemented features:
+ - SBI
+ - IO-unit
+- Maybe split from Sun4m
+
+Sun4u:
+- Unimplemented features/bugs:
+ - Interrupt controller
+ - PCI/IOMMU support (Simba, JIO, Tomatillo, Psycho, Schizo, Safari...)
+ - SMP
+ - Happy Meal Ethernet, flash, I2C, GPIO
+ - A lot of real machine types
+
+Sun4v:
+- A lot of unimplemented features
+ - A lot of real machine types
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
new file mode 100644
index 0000000..320530e
--- /dev/null
+++ b/target-sparc/cpu.h
@@ -0,0 +1,659 @@
+#ifndef CPU_SPARC_H
+#define CPU_SPARC_H
+
+#include "config.h"
+#include "qemu-common.h"
+
+#if !defined(TARGET_SPARC64)
+#define TARGET_LONG_BITS 32
+#define TARGET_FPREGS 32
+#define TARGET_PAGE_BITS 12 /* 4k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+#define TARGET_LONG_BITS 64
+#define TARGET_FPREGS 64
+#define TARGET_PAGE_BITS 13 /* 8k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 41
+# ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# else
+# define TARGET_VIRT_ADDR_SPACE_BITS 44
+# endif
+#endif
+
+#define CPUState struct CPUSPARCState
+
+#include "cpu-defs.h"
+
+#include "softfloat.h"
+
+#define TARGET_HAS_ICE 1
+
+#if !defined(TARGET_SPARC64)
+#define ELF_MACHINE EM_SPARC
+#else
+#define ELF_MACHINE EM_SPARCV9
+#endif
+
+/*#define EXCP_INTERRUPT 0x100*/
+
+/* trap definitions */
+#ifndef TARGET_SPARC64
+#define TT_TFAULT 0x01
+#define TT_ILL_INSN 0x02
+#define TT_PRIV_INSN 0x03
+#define TT_NFPU_INSN 0x04
+#define TT_WIN_OVF 0x05
+#define TT_WIN_UNF 0x06
+#define TT_UNALIGNED 0x07
+#define TT_FP_EXCP 0x08
+#define TT_DFAULT 0x09
+#define TT_TOVF 0x0a
+#define TT_EXTINT 0x10
+#define TT_CODE_ACCESS 0x21
+#define TT_UNIMP_FLUSH 0x25
+#define TT_DATA_ACCESS 0x29
+#define TT_DIV_ZERO 0x2a
+#define TT_NCP_INSN 0x24
+#define TT_TRAP 0x80
+#else
+#define TT_POWER_ON_RESET 0x01
+#define TT_TFAULT 0x08
+#define TT_CODE_ACCESS 0x0a
+#define TT_ILL_INSN 0x10
+#define TT_UNIMP_FLUSH TT_ILL_INSN
+#define TT_PRIV_INSN 0x11
+#define TT_NFPU_INSN 0x20
+#define TT_FP_EXCP 0x21
+#define TT_TOVF 0x23
+#define TT_CLRWIN 0x24
+#define TT_DIV_ZERO 0x28
+#define TT_DFAULT 0x30
+#define TT_DATA_ACCESS 0x32
+#define TT_UNALIGNED 0x34
+#define TT_PRIV_ACT 0x37
+#define TT_EXTINT 0x40
+#define TT_IVEC 0x60
+#define TT_TMISS 0x64
+#define TT_DMISS 0x68
+#define TT_DPROT 0x6c
+#define TT_SPILL 0x80
+#define TT_FILL 0xc0
+#define TT_WOTHER (1 << 5)
+#define TT_TRAP 0x100
+#endif
+
+#define PSR_NEG_SHIFT 23
+#define PSR_NEG (1 << PSR_NEG_SHIFT)
+#define PSR_ZERO_SHIFT 22
+#define PSR_ZERO (1 << PSR_ZERO_SHIFT)
+#define PSR_OVF_SHIFT 21
+#define PSR_OVF (1 << PSR_OVF_SHIFT)
+#define PSR_CARRY_SHIFT 20
+#define PSR_CARRY (1 << PSR_CARRY_SHIFT)
+#define PSR_ICC (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY)
+#if !defined(TARGET_SPARC64)
+#define PSR_EF (1<<12)
+#define PSR_PIL 0xf00
+#define PSR_S (1<<7)
+#define PSR_PS (1<<6)
+#define PSR_ET (1<<5)
+#define PSR_CWP 0x1f
+#endif
+
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_DST (env->cc_dst)
+#define CC_OP (env->cc_op)
+
+enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_FLAGS, /* all cc are back in status register */
+ CC_OP_DIV, /* modify N, Z and V, C = 0*/
+ CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */
+ CC_OP_NB,
+};
+
+/* Trap base register */
+#define TBR_BASE_MASK 0xfffff000
+
+#if defined(TARGET_SPARC64)
+#define PS_TCT (1<<12) /* UA2007, impl.dep. trap on control transfer */
+#define PS_IG (1<<11) /* v9, zero on UA2007 */
+#define PS_MG (1<<10) /* v9, zero on UA2007 */
+#define PS_CLE (1<<9) /* UA2007 */
+#define PS_TLE (1<<8) /* UA2007 */
+#define PS_RMO (1<<7)
+#define PS_RED (1<<5) /* v9, zero on UA2007 */
+#define PS_PEF (1<<4) /* enable fpu */
+#define PS_AM (1<<3) /* address mask */
+#define PS_PRIV (1<<2)
+#define PS_IE (1<<1)
+#define PS_AG (1<<0) /* v9, zero on UA2007 */
+
+#define FPRS_FEF (1<<2)
+
+#define HS_PRIV (1<<2)
+#endif
+
+/* Fcc */
+#define FSR_RD1 (1ULL << 31)
+#define FSR_RD0 (1ULL << 30)
+#define FSR_RD_MASK (FSR_RD1 | FSR_RD0)
+#define FSR_RD_NEAREST 0
+#define FSR_RD_ZERO FSR_RD0
+#define FSR_RD_POS FSR_RD1
+#define FSR_RD_NEG (FSR_RD1 | FSR_RD0)
+
+#define FSR_NVM (1ULL << 27)
+#define FSR_OFM (1ULL << 26)
+#define FSR_UFM (1ULL << 25)
+#define FSR_DZM (1ULL << 24)
+#define FSR_NXM (1ULL << 23)
+#define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM)
+
+#define FSR_NVA (1ULL << 9)
+#define FSR_OFA (1ULL << 8)
+#define FSR_UFA (1ULL << 7)
+#define FSR_DZA (1ULL << 6)
+#define FSR_NXA (1ULL << 5)
+#define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+#define FSR_NVC (1ULL << 4)
+#define FSR_OFC (1ULL << 3)
+#define FSR_UFC (1ULL << 2)
+#define FSR_DZC (1ULL << 1)
+#define FSR_NXC (1ULL << 0)
+#define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC)
+
+#define FSR_FTT2 (1ULL << 16)
+#define FSR_FTT1 (1ULL << 15)
+#define FSR_FTT0 (1ULL << 14)
+//gcc warns about constant overflow for ~FSR_FTT_MASK
+//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
+#ifdef TARGET_SPARC64
+#define FSR_FTT_NMASK 0xfffffffffffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL
+#define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL
+#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL
+#else
+#define FSR_FTT_NMASK 0xfffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x000fc000ULL
+#endif
+#define FSR_LDFSR_MASK 0xcfc00fffULL
+#define FSR_FTT_IEEE_EXCP (1ULL << 14)
+#define FSR_FTT_UNIMPFPOP (3ULL << 14)
+#define FSR_FTT_SEQ_ERROR (4ULL << 14)
+#define FSR_FTT_INVAL_FPR (6ULL << 14)
+
+#define FSR_FCC1_SHIFT 11
+#define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT)
+#define FSR_FCC0_SHIFT 10
+#define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT)
+
+/* MMU */
+#define MMU_E (1<<0)
+#define MMU_NF (1<<1)
+
+#define PTE_ENTRYTYPE_MASK 3
+#define PTE_ACCESS_MASK 0x1c
+#define PTE_ACCESS_SHIFT 2
+#define PTE_PPN_SHIFT 7
+#define PTE_ADDR_MASK 0xffffff00
+
+#define PG_ACCESSED_BIT 5
+#define PG_MODIFIED_BIT 6
+#define PG_CACHE_BIT 7
+
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
+#define PG_CACHE_MASK (1 << PG_CACHE_BIT)
+
+/* 3 <= NWINDOWS <= 32. */
+#define MIN_NWINDOWS 3
+#define MAX_NWINDOWS 32
+
+#if !defined(TARGET_SPARC64)
+#define NB_MMU_MODES 2
+#else
+#define NB_MMU_MODES 6
+typedef struct trap_state {
+ uint64_t tpc;
+ uint64_t tnpc;
+ uint64_t tstate;
+ uint32_t tt;
+} trap_state;
+#endif
+
+typedef struct sparc_def_t {
+ const char *name;
+ target_ulong iu_version;
+ uint32_t fpu_version;
+ uint32_t mmu_version;
+ uint32_t mmu_bm;
+ uint32_t mmu_ctpr_mask;
+ uint32_t mmu_cxr_mask;
+ uint32_t mmu_sfsr_mask;
+ uint32_t mmu_trcr_mask;
+ uint32_t mxcc_version;
+ uint32_t features;
+ uint32_t nwindows;
+ uint32_t maxtl;
+} sparc_def_t;
+
+#define CPU_FEATURE_FLOAT (1 << 0)
+#define CPU_FEATURE_FLOAT128 (1 << 1)
+#define CPU_FEATURE_SWAP (1 << 2)
+#define CPU_FEATURE_MUL (1 << 3)
+#define CPU_FEATURE_DIV (1 << 4)
+#define CPU_FEATURE_FLUSH (1 << 5)
+#define CPU_FEATURE_FSQRT (1 << 6)
+#define CPU_FEATURE_FMUL (1 << 7)
+#define CPU_FEATURE_VIS1 (1 << 8)
+#define CPU_FEATURE_VIS2 (1 << 9)
+#define CPU_FEATURE_FSMULD (1 << 10)
+#define CPU_FEATURE_HYPV (1 << 11)
+#define CPU_FEATURE_CMT (1 << 12)
+#define CPU_FEATURE_GL (1 << 13)
+#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
+#define CPU_FEATURE_ASR17 (1 << 15)
+#define CPU_FEATURE_CACHE_CTRL (1 << 16)
+
+#ifndef TARGET_SPARC64
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
+#else
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \
+ CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD)
+enum {
+ mmu_us_12, // Ultrasparc < III (64 entry TLB)
+ mmu_us_3, // Ultrasparc III (512 entry TLB)
+ mmu_us_4, // Ultrasparc IV (several TLBs, 32 and 256MB pages)
+ mmu_sun4v, // T1, T2
+};
+#endif
+
+#define TTE_VALID_BIT (1ULL << 63)
+#define TTE_USED_BIT (1ULL << 41)
+#define TTE_LOCKED_BIT (1ULL << 6)
+#define TTE_GLOBAL_BIT (1ULL << 0)
+
+#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
+#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
+#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
+#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT)
+
+#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT)
+#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
+
+typedef struct SparcTLBEntry {
+ uint64_t tag;
+ uint64_t tte;
+} SparcTLBEntry;
+
+struct CPUTimer
+{
+ const char *name;
+ uint32_t frequency;
+ uint32_t disabled;
+ uint64_t disabled_mask;
+ int64_t clock_offset;
+ struct QEMUTimer *qtimer;
+};
+
+typedef struct CPUTimer CPUTimer;
+
+struct QEMUFile;
+void cpu_put_timer(struct QEMUFile *f, CPUTimer *s);
+void cpu_get_timer(struct QEMUFile *f, CPUTimer *s);
+
+typedef struct CPUSPARCState {
+ target_ulong gregs[8]; /* general registers */
+ target_ulong *regwptr; /* pointer to current register window */
+ target_ulong pc; /* program counter */
+ target_ulong npc; /* next program counter */
+ target_ulong y; /* multiply/divide register */
+
+ /* emulator internal flags handling */
+ target_ulong cc_src, cc_src2;
+ target_ulong cc_dst;
+ uint32_t cc_op;
+
+ target_ulong t0, t1; /* temporaries live across basic blocks */
+ target_ulong cond; /* conditional branch result (XXX: save it in a
+ temporary register when possible) */
+
+ uint32_t psr; /* processor state register */
+ target_ulong fsr; /* FPU state register */
+ float32 fpr[TARGET_FPREGS]; /* floating point registers */
+ uint32_t cwp; /* index of current register window (extracted
+ from PSR) */
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
+ uint32_t wim; /* window invalid mask */
+#endif
+ target_ulong tbr; /* trap base register */
+#if !defined(TARGET_SPARC64)
+ int psrs; /* supervisor mode (extracted from PSR) */
+ int psrps; /* previous supervisor mode */
+ int psret; /* enable traps */
+#endif
+ uint32_t psrpil; /* interrupt blocking level */
+ uint32_t pil_in; /* incoming interrupt level bitmap */
+#if !defined(TARGET_SPARC64)
+ int psref; /* enable fpu */
+#endif
+ target_ulong version;
+ int interrupt_index;
+ uint32_t nwindows;
+ /* NOTE: we allow 8 more registers to handle wrapping */
+ target_ulong regbase[MAX_NWINDOWS * 16 + 8];
+
+ CPU_COMMON
+
+ /* MMU regs */
+#if defined(TARGET_SPARC64)
+ uint64_t lsu;
+#define DMMU_E 0x8
+#define IMMU_E 0x4
+ //typedef struct SparcMMU
+ union {
+ uint64_t immuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t unused_mmu_primary_context; // use DMMU
+ uint64_t unused_mmu_secondary_context; // use DMMU
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ } immu;
+ };
+ union {
+ uint64_t dmmuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t mmu_primary_context;
+ uint64_t mmu_secondary_context;
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ } dmmu;
+ };
+ SparcTLBEntry itlb[64];
+ SparcTLBEntry dtlb[64];
+ uint32_t mmu_version;
+#else
+ uint32_t mmuregs[32];
+ uint64_t mxccdata[4];
+ uint64_t mxccregs[8];
+ uint64_t mmubpregs[4];
+ uint64_t prom_addr;
+#endif
+ /* temporary float registers */
+ float64 dt0, dt1;
+ float128 qt0, qt1;
+ float_status fp_status;
+#if defined(TARGET_SPARC64)
+#define MAXTL_MAX 8
+#define MAXTL_MASK (MAXTL_MAX - 1)
+ trap_state ts[MAXTL_MAX];
+ uint32_t xcc; /* Extended integer condition codes */
+ uint32_t asi;
+ uint32_t pstate;
+ uint32_t tl;
+ uint32_t maxtl;
+ uint32_t cansave, canrestore, otherwin, wstate, cleanwin;
+ uint64_t agregs[8]; /* alternate general registers */
+ uint64_t bgregs[8]; /* backup for normal global registers */
+ uint64_t igregs[8]; /* interrupt general registers */
+ uint64_t mgregs[8]; /* mmu general registers */
+ uint64_t fprs;
+ uint64_t tick_cmpr, stick_cmpr;
+ CPUTimer *tick, *stick;
+#define TICK_NPT_MASK 0x8000000000000000ULL
+#define TICK_INT_DIS 0x8000000000000000ULL
+ uint64_t gsr;
+ uint32_t gl; // UA2005
+ /* UA 2005 hyperprivileged registers */
+ uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
+ CPUTimer *hstick; // UA 2005
+ uint32_t softint;
+#define SOFTINT_TIMER 1
+#define SOFTINT_STIMER (1 << 16)
+#define SOFTINT_INTRMASK (0xFFFE)
+#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
+#endif
+ sparc_def_t *def;
+
+ void *irq_manager;
+ void (*qemu_irq_ack) (void *irq_manager, int intno);
+
+ /* Leon3 cache control */
+ uint32_t cache_control;
+} CPUSPARCState;
+
+#ifndef NO_CPU_IO_DEFS
+/* helper.c */
+CPUSPARCState *cpu_sparc_init(const char *cpu_model);
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+int cpu_sparc_handle_mmu_fault(CPUSPARCState *env1, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu);
+#define cpu_handle_mmu_fault cpu_sparc_handle_mmu_fault
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env);
+
+/* translate.c */
+void gen_intermediate_code_init(CPUSPARCState *env);
+
+/* cpu-exec.c */
+int cpu_sparc_exec(CPUSPARCState *s);
+
+/* op_helper.c */
+target_ulong cpu_get_psr(CPUState *env1);
+void cpu_put_psr(CPUState *env1, target_ulong val);
+#ifdef TARGET_SPARC64
+target_ulong cpu_get_ccr(CPUState *env1);
+void cpu_put_ccr(CPUState *env1, target_ulong val);
+target_ulong cpu_get_cwp64(CPUState *env1);
+void cpu_put_cwp64(CPUState *env1, int cwp);
+#endif
+int cpu_cwp_inc(CPUState *env1, int cwp);
+int cpu_cwp_dec(CPUState *env1, int cwp);
+void cpu_set_cwp(CPUState *env1, int new_cwp);
+void leon3_irq_manager(void *irq_manager, int intno);
+
+/* sun4m.c, sun4u.c */
+void cpu_check_irqs(CPUSPARCState *env);
+
+/* leon3.c */
+void leon3_irq_ack(void *irq_manager, int intno);
+
+#if defined (TARGET_SPARC64)
+
+static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask)
+{
+ return (x & mask) == (y & mask);
+}
+
+#define MMU_CONTEXT_BITS 13
+#define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1)
+
+static inline int tlb_compare_context(const SparcTLBEntry *tlb,
+ uint64_t context)
+{
+ return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
+}
+
+#endif
+#endif
+
+/* cpu-exec.c */
+#if !defined(CONFIG_USER_ONLY)
+void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
+ int is_asi, int size);
+target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr,
+ int mmu_idx);
+
+#endif
+int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_init cpu_sparc_init
+#define cpu_exec cpu_sparc_exec
+#define cpu_gen_code cpu_sparc_gen_code
+#define cpu_signal_handler cpu_sparc_signal_handler
+#define cpu_list sparc_cpu_list
+
+#define CPU_SAVE_VERSION 6
+
+/* MMU modes definitions */
+#if defined (TARGET_SPARC64)
+#define MMU_USER_IDX 0
+#define MMU_MODE0_SUFFIX _user
+#define MMU_USER_SECONDARY_IDX 1
+#define MMU_MODE1_SUFFIX _user_secondary
+#define MMU_KERNEL_IDX 2
+#define MMU_MODE2_SUFFIX _kernel
+#define MMU_KERNEL_SECONDARY_IDX 3
+#define MMU_MODE3_SUFFIX _kernel_secondary
+#define MMU_NUCLEUS_IDX 4
+#define MMU_MODE4_SUFFIX _nucleus
+#define MMU_HYPV_IDX 5
+#define MMU_MODE5_SUFFIX _hypv
+#else
+#define MMU_USER_IDX 0
+#define MMU_MODE0_SUFFIX _user
+#define MMU_KERNEL_IDX 1
+#define MMU_MODE1_SUFFIX _kernel
+#endif
+
+#if defined (TARGET_SPARC64)
+static inline int cpu_has_hypervisor(CPUState *env1)
+{
+ return env1->def->features & CPU_FEATURE_HYPV;
+}
+
+static inline int cpu_hypervisor_mode(CPUState *env1)
+{
+ return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV);
+}
+
+static inline int cpu_supervisor_mode(CPUState *env1)
+{
+ return env1->pstate & PS_PRIV;
+}
+#endif
+
+static inline int cpu_mmu_index(CPUState *env1)
+{
+#if defined(CONFIG_USER_ONLY)
+ return MMU_USER_IDX;
+#elif !defined(TARGET_SPARC64)
+ return env1->psrs;
+#else
+ if (env1->tl > 0) {
+ return MMU_NUCLEUS_IDX;
+ } else if (cpu_hypervisor_mode(env1)) {
+ return MMU_HYPV_IDX;
+ } else if (cpu_supervisor_mode(env1)) {
+ return MMU_KERNEL_IDX;
+ } else {
+ return MMU_USER_IDX;
+ }
+#endif
+}
+
+static inline int cpu_interrupts_enabled(CPUState *env1)
+{
+#if !defined (TARGET_SPARC64)
+ if (env1->psret != 0)
+ return 1;
+#else
+ if (env1->pstate & PS_IE)
+ return 1;
+#endif
+
+ return 0;
+}
+
+static inline int cpu_pil_allowed(CPUState *env1, int pil)
+{
+#if !defined(TARGET_SPARC64)
+ /* level 15 is non-maskable on sparc v8 */
+ return pil == 15 || pil > env1->psrpil;
+#else
+ return pil > env1->psrpil;
+#endif
+}
+
+static inline int cpu_fpu_enabled(CPUState *env1)
+{
+#if defined(CONFIG_USER_ONLY)
+ return 1;
+#elif !defined(TARGET_SPARC64)
+ return env1->psref;
+#else
+ return ((env1->pstate & PS_PEF) != 0) && ((env1->fprs & FPRS_FEF) != 0);
+#endif
+}
+
+#if defined(CONFIG_USER_ONLY)
+static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+{
+ if (newsp)
+ env->regwptr[22] = newsp;
+ env->regwptr[0] = 0;
+ /* FIXME: Do we also need to clear CF? */
+ /* XXXXX */
+ printf ("HELPME: %s:%d\n", __FILE__, __LINE__);
+}
+#endif
+
+#include "cpu-all.h"
+
+#ifdef TARGET_SPARC64
+/* sun4u.c */
+void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
+uint64_t cpu_tick_get_count(CPUTimer *timer);
+void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
+trap_state* cpu_tsptr(CPUState* env);
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->pc;
+ *cs_base = env->npc;
+#ifdef TARGET_SPARC64
+ // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
+ *flags = ((env->pstate & PS_AM) << 2) /* 5 */
+ | (((env->pstate & PS_PEF) >> 1) /* 3 */
+ | ((env->fprs & FPRS_FEF) << 2)) /* 4 */
+ | (env->pstate & PS_PRIV) /* 2 */
+ | ((env->lsu & (DMMU_E | IMMU_E)) >> 2) /* 1, 0 */
+ | ((env->tl & 0xff) << 8)
+ | (env->dmmu.mmu_primary_context << 16); /* 16... */
+#else
+ // FPU enable . Supervisor
+ *flags = (env->psref << 4) | env->psrs;
+#endif
+}
+
+#endif
diff --git a/target-sparc/exec.h b/target-sparc/exec.h
new file mode 100644
index 0000000..f811571
--- /dev/null
+++ b/target-sparc/exec.h
@@ -0,0 +1,41 @@
+#ifndef EXEC_SPARC_H
+#define EXEC_SPARC_H 1
+#include "config.h"
+#include "dyngen-exec.h"
+
+register struct CPUSPARCState *env asm(AREG0);
+
+#include "cpu.h"
+#include "exec-all.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+/* op_helper.c */
+void do_interrupt(CPUState *env);
+
+static inline int cpu_has_work(CPUState *env1)
+{
+ return (env1->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_interrupts_enabled(env1);
+}
+
+
+static inline int cpu_halted(CPUState *env1) {
+ if (!env1->halted)
+ return 0;
+ if (cpu_has_work(env1)) {
+ env1->halted = 0;
+ return 0;
+ }
+ return EXCP_HALTED;
+}
+
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+ env->pc = tb->pc;
+ env->npc = tb->cs_base;
+}
+
+#endif
diff --git a/target-sparc/helper.c b/target-sparc/helper.c
new file mode 100644
index 0000000..b2d4d70
--- /dev/null
+++ b/target-sparc/helper.c
@@ -0,0 +1,1546 @@
+/*
+ * sparc helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "qemu-common.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_FEATURES
+
+#ifdef DEBUG_MMU
+#define DPRINTF_MMU(fmt, ...) \
+ do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MMU(fmt, ...) do {} while (0)
+#endif
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model);
+
+/* Sparc MMU emulation */
+
+#if defined(CONFIG_USER_ONLY)
+
+int cpu_sparc_handle_mmu_fault(CPUState *env1, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ if (rw & 2)
+ env1->exception_index = TT_TFAULT;
+ else
+ env1->exception_index = TT_DFAULT;
+ return 1;
+}
+
+#else
+
+#ifndef TARGET_SPARC64
+/*
+ * Sparc V8 Reference MMU (SRMMU)
+ */
+static const int access_table[8][8] = {
+ { 0, 0, 0, 0, 8, 0, 12, 12 },
+ { 0, 0, 0, 0, 8, 0, 0, 0 },
+ { 8, 8, 0, 0, 0, 8, 12, 12 },
+ { 8, 8, 0, 0, 0, 8, 0, 0 },
+ { 8, 0, 8, 0, 8, 8, 12, 12 },
+ { 8, 0, 8, 0, 8, 0, 8, 0 },
+ { 8, 8, 8, 0, 8, 8, 12, 12 },
+ { 8, 8, 8, 0, 8, 8, 8, 0 }
+};
+
+static const int perm_table[2][8] = {
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC
+ },
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ,
+ 0,
+ 0,
+ }
+};
+
+static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
+ int *prot, int *access_index,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ int access_perms = 0;
+ target_phys_addr_t pde_ptr;
+ uint32_t pde;
+ int error_code = 0, is_dirty, is_user;
+ unsigned long page_offset;
+
+ is_user = mmu_idx == MMU_USER_IDX;
+
+ if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ *page_size = TARGET_PAGE_SIZE;
+ // Boot mode: instruction fetches are taken from PROM
+ if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
+ *physical = env->prom_addr | (address & 0x7ffffULL);
+ *prot = PAGE_READ | PAGE_EXEC;
+ return 0;
+ }
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1);
+ *physical = 0xffffffffffff0000ULL;
+
+ /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
+ /* Context base + context number */
+ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+ pde = ldl_phys(pde_ptr);
+
+ /* Ctx pde */
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return 1 << 2;
+ case 2: /* L0 PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 4 << 2;
+ case 1: /* L0 PDE */
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (1 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (1 << 8) | (4 << 2);
+ case 1: /* L1 PDE */
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (2 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (2 << 8) | (4 << 2);
+ case 1: /* L2 PDE */
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (3 << 8) | (1 << 2);
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return (3 << 8) | (4 << 2);
+ case 2: /* L3 PTE */
+ page_offset = (address & TARGET_PAGE_MASK) &
+ (TARGET_PAGE_SIZE - 1);
+ }
+ *page_size = TARGET_PAGE_SIZE;
+ break;
+ case 2: /* L2 PTE */
+ page_offset = address & 0x3ffff;
+ *page_size = 0x40000;
+ }
+ break;
+ case 2: /* L1 PTE */
+ page_offset = address & 0xffffff;
+ *page_size = 0x1000000;
+ }
+ }
+
+ /* check access */
+ access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
+ error_code = access_table[*access_index][access_perms];
+ if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user))
+ return error_code;
+
+ /* update page modified and dirty bits */
+ is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
+ if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
+ pde |= PG_ACCESSED_MASK;
+ if (is_dirty)
+ pde |= PG_MODIFIED_MASK;
+ stl_phys_notdirty(pde_ptr, pde);
+ }
+
+ /* the page can be put in the TLB */
+ *prot = perm_table[is_user][access_perms];
+ if (!(pde & PG_MODIFIED_MASK)) {
+ /* only set write access if already dirty... otherwise wait
+ for dirty access */
+ *prot &= ~PAGE_WRITE;
+ }
+
+ /* Even if large ptes, we map only one 4KB page in the cache to
+ avoid filling it too fast */
+ *physical = ((target_phys_addr_t)(pde & PTE_ADDR_MASK) << 4) + page_offset;
+ return error_code;
+}
+
+/* Perform address translation */
+int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ target_phys_addr_t paddr;
+ target_ulong vaddr;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
+
+ error_code = get_physical_address(env, &paddr, &prot, &access_index,
+ address, rw, mmu_idx, &page_size);
+ if (error_code == 0) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr &= TARGET_PAGE_MASK;
+#ifdef DEBUG_MMU
+ printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
+ TARGET_FMT_lx "\n", address, paddr, vaddr);
+#endif
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
+ }
+
+ if (env->mmuregs[3]) /* Fault status register */
+ env->mmuregs[3] = 1; /* overflow (not read before another fault) */
+ env->mmuregs[3] |= (access_index << 5) | error_code | 2;
+ env->mmuregs[4] = address; /* Fault address register */
+
+ if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
+ // No fault mode: if a mapping is available, just override
+ // permissions. If no mapping is available, redirect accesses to
+ // neverland. Fake/overridden mappings will be flushed when
+ // switching to normal mode.
+ vaddr = address & TARGET_PAGE_MASK;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+ } else {
+ if (rw & 2)
+ env->exception_index = TT_TFAULT;
+ else
+ env->exception_index = TT_DFAULT;
+ return 1;
+ }
+}
+
+target_ulong mmu_probe(CPUState *env, target_ulong address, int mmulev)
+{
+ target_phys_addr_t pde_ptr;
+ uint32_t pde;
+
+ /* Context base + context number */
+ pde_ptr = (target_phys_addr_t)(env->mmuregs[1] << 4) +
+ (env->mmuregs[2] << 2);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 2: /* PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 0;
+ case 1: /* L1 PDE */
+ if (mmulev == 3)
+ return pde;
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L1 PTE */
+ return pde;
+ case 1: /* L2 PDE */
+ if (mmulev == 2)
+ return pde;
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L2 PTE */
+ return pde;
+ case 1: /* L3 PDE */
+ if (mmulev == 1)
+ return pde;
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = ldl_phys(pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L3 PTE */
+ return pde;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
+{
+ target_ulong va, va1, va2;
+ unsigned int n, m, o;
+ target_phys_addr_t pde_ptr, pa;
+ uint32_t pde;
+
+ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+ pde = ldl_phys(pde_ptr);
+ (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
+ (target_phys_addr_t)env->mmuregs[1] << 4, env->mmuregs[2]);
+ for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
+ pde = mmu_probe(env, va, 2);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(env, va);
+ (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
+ " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
+ for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
+ pde = mmu_probe(env, va1, 1);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(env, va1);
+ (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
+ va1, pa, pde);
+ for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
+ pde = mmu_probe(env, va2, 0);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(env, va2);
+ (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PTE: "
+ TARGET_FMT_lx "\n",
+ va2, pa, pde);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#else /* !TARGET_SPARC64 */
+
+// 41 bit physical address space
+static inline target_phys_addr_t ultrasparc_truncate_physical(uint64_t x)
+{
+ return x & 0x1ffffffffffULL;
+}
+
+/*
+ * UltraSparc IIi I/DMMUs
+ */
+
+// Returns true if TTE tag is valid and matches virtual address value in context
+// requires virtual address mask value calculated from TTE entry size
+static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
+ uint64_t address, uint64_t context,
+ target_phys_addr_t *physical)
+{
+ uint64_t mask;
+
+ switch ((tlb->tte >> 61) & 3) {
+ default:
+ case 0x0: // 8k
+ mask = 0xffffffffffffe000ULL;
+ break;
+ case 0x1: // 64k
+ mask = 0xffffffffffff0000ULL;
+ break;
+ case 0x2: // 512k
+ mask = 0xfffffffffff80000ULL;
+ break;
+ case 0x3: // 4M
+ mask = 0xffffffffffc00000ULL;
+ break;
+ }
+
+ // valid, context match, virtual address match?
+ if (TTE_IS_VALID(tlb->tte) &&
+ (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
+ && compare_masked(address, tlb->tag, mask))
+ {
+ // decode physical address
+ *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_physical_address_data(CPUState *env,
+ target_phys_addr_t *physical, int *prot,
+ target_ulong address, int rw, int mmu_idx)
+{
+ unsigned int i;
+ uint64_t context;
+
+ int is_user = (mmu_idx == MMU_USER_IDX ||
+ mmu_idx == MMU_USER_SECONDARY_IDX);
+
+ if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */
+ *physical = ultrasparc_truncate_physical(address);
+ *prot = PAGE_READ | PAGE_WRITE;
+ return 0;
+ }
+
+ switch(mmu_idx) {
+ case MMU_USER_IDX:
+ case MMU_KERNEL_IDX:
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ break;
+ case MMU_USER_SECONDARY_IDX:
+ case MMU_KERNEL_SECONDARY_IDX:
+ context = env->dmmu.mmu_secondary_context & 0x1fff;
+ break;
+ case MMU_NUCLEUS_IDX:
+ default:
+ context = 0;
+ break;
+ }
+
+ for (i = 0; i < 64; i++) {
+ // ctx match, vaddr match, valid?
+ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
+
+ uint8_t fault_type = 0;
+
+ // access ok?
+ if ((env->dtlb[i].tte & 0x4) && is_user) {
+ fault_type |= 1; /* privilege violation */
+ env->exception_index = TT_DFAULT;
+
+ DPRINTF_MMU("DFAULT at %" PRIx64 " context %" PRIx64
+ " mmu_idx=%d tl=%d\n",
+ address, context, mmu_idx, env->tl);
+ } else if (!(env->dtlb[i].tte & 0x2) && (rw == 1)) {
+ env->exception_index = TT_DPROT;
+
+ DPRINTF_MMU("DPROT at %" PRIx64 " context %" PRIx64
+ " mmu_idx=%d tl=%d\n",
+ address, context, mmu_idx, env->tl);
+ } else {
+ *prot = PAGE_READ;
+ if (env->dtlb[i].tte & 0x2)
+ *prot |= PAGE_WRITE;
+
+ TTE_SET_USED(env->dtlb[i].tte);
+
+ return 0;
+ }
+
+ if (env->dmmu.sfsr & 1) /* Fault status register */
+ env->dmmu.sfsr = 2; /* overflow (not read before
+ another fault) */
+
+ env->dmmu.sfsr |= (is_user << 3) | ((rw == 1) << 2) | 1;
+
+ env->dmmu.sfsr |= (fault_type << 7);
+
+ env->dmmu.sfar = address; /* Fault address register */
+
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+
+ return 1;
+ }
+ }
+
+ DPRINTF_MMU("DMISS at %" PRIx64 " context %" PRIx64 "\n",
+ address, context);
+
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+ env->exception_index = TT_DMISS;
+ return 1;
+}
+
+static int get_physical_address_code(CPUState *env,
+ target_phys_addr_t *physical, int *prot,
+ target_ulong address, int mmu_idx)
+{
+ unsigned int i;
+ uint64_t context;
+
+ int is_user = (mmu_idx == MMU_USER_IDX ||
+ mmu_idx == MMU_USER_SECONDARY_IDX);
+
+ if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) {
+ /* IMMU disabled */
+ *physical = ultrasparc_truncate_physical(address);
+ *prot = PAGE_EXEC;
+ return 0;
+ }
+
+ if (env->tl == 0) {
+ /* PRIMARY context */
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ } else {
+ /* NUCLEUS context */
+ context = 0;
+ }
+
+ for (i = 0; i < 64; i++) {
+ // ctx match, vaddr match, valid?
+ if (ultrasparc_tag_match(&env->itlb[i],
+ address, context, physical)) {
+ // access ok?
+ if ((env->itlb[i].tte & 0x4) && is_user) {
+ if (env->immu.sfsr) /* Fault status register */
+ env->immu.sfsr = 2; /* overflow (not read before
+ another fault) */
+ env->immu.sfsr |= (is_user << 3) | 1;
+ env->exception_index = TT_TFAULT;
+
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+
+ DPRINTF_MMU("TFAULT at %" PRIx64 " context %" PRIx64 "\n",
+ address, context);
+
+ return 1;
+ }
+ *prot = PAGE_EXEC;
+ TTE_SET_USED(env->itlb[i].tte);
+ return 0;
+ }
+ }
+
+ DPRINTF_MMU("TMISS at %" PRIx64 " context %" PRIx64 "\n",
+ address, context);
+
+ /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+ env->exception_index = TT_TMISS;
+ return 1;
+}
+
+static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
+ int *prot, int *access_index,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ /* ??? We treat everything as a small page, then explicitly flush
+ everything when an entry is evicted. */
+ *page_size = TARGET_PAGE_SIZE;
+
+#if defined (DEBUG_MMU)
+ /* safety net to catch wrong softmmu index use from dynamic code */
+ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
+ DPRINTF_MMU("get_physical_address %s tl=%d mmu_idx=%d"
+ " primary context=%" PRIx64
+ " secondary context=%" PRIx64
+ " address=%" PRIx64
+ "\n",
+ (rw == 2 ? "CODE" : "DATA"),
+ env->tl, mmu_idx,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context,
+ address);
+ }
+#endif
+
+ if (rw == 2)
+ return get_physical_address_code(env, physical, prot, address,
+ mmu_idx);
+ else
+ return get_physical_address_data(env, physical, prot, address, rw,
+ mmu_idx);
+}
+
+/* Perform address translation */
+int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ target_ulong virt_addr, vaddr;
+ target_phys_addr_t paddr;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
+
+ error_code = get_physical_address(env, &paddr, &prot, &access_index,
+ address, rw, mmu_idx, &page_size);
+ if (error_code == 0) {
+ virt_addr = address & TARGET_PAGE_MASK;
+ vaddr = virt_addr + ((address & TARGET_PAGE_MASK) &
+ (TARGET_PAGE_SIZE - 1));
+
+ DPRINTF_MMU("Translate at %" PRIx64 " -> %" PRIx64 ","
+ " vaddr %" PRIx64
+ " mmu_idx=%d"
+ " tl=%d"
+ " primary context=%" PRIx64
+ " secondary context=%" PRIx64
+ "\n",
+ address, paddr, vaddr, mmu_idx, env->tl,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
+ }
+ // XXX
+ return 1;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
+{
+ unsigned int i;
+ const char *mask;
+
+ (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %"
+ PRId64 "\n",
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+ if ((env->lsu & DMMU_E) == 0) {
+ (*cpu_fprintf)(f, "DMMU disabled\n");
+ } else {
+ (*cpu_fprintf)(f, "DMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch ((env->dtlb[i].tte >> 61) & 3) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if ((env->dtlb[i].tte & 0x8000000000000000ULL) != 0) {
+ (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %" PRIx64
+ ", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
+ i,
+ env->dtlb[i].tag & (uint64_t)~0x1fffULL,
+ env->dtlb[i].tte & (uint64_t)0x1ffffffe000ULL,
+ mask,
+ env->dtlb[i].tte & 0x4? "priv": "user",
+ env->dtlb[i].tte & 0x2? "RW": "RO",
+ env->dtlb[i].tte & 0x40? "locked": "unlocked",
+ env->dtlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->dtlb[i].tte)?
+ "global" : "local");
+ }
+ }
+ }
+ if ((env->lsu & IMMU_E) == 0) {
+ (*cpu_fprintf)(f, "IMMU disabled\n");
+ } else {
+ (*cpu_fprintf)(f, "IMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch ((env->itlb[i].tte >> 61) & 3) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if ((env->itlb[i].tte & 0x8000000000000000ULL) != 0) {
+ (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %" PRIx64
+ ", %s, %s, %s, ctx %" PRId64 " %s\n",
+ i,
+ env->itlb[i].tag & (uint64_t)~0x1fffULL,
+ env->itlb[i].tte & (uint64_t)0x1ffffffe000ULL,
+ mask,
+ env->itlb[i].tte & 0x4? "priv": "user",
+ env->itlb[i].tte & 0x40? "locked": "unlocked",
+ env->itlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->itlb[i].tte)?
+ "global" : "local");
+ }
+ }
+ }
+}
+
+#endif /* TARGET_SPARC64 */
+#endif /* !CONFIG_USER_ONLY */
+
+
+#if !defined(CONFIG_USER_ONLY)
+target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr,
+ int mmu_idx)
+{
+ target_phys_addr_t phys_addr;
+ target_ulong page_size;
+ int prot, access_index;
+
+ if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2,
+ mmu_idx, &page_size) != 0)
+ if (get_physical_address(env, &phys_addr, &prot, &access_index, addr,
+ 0, mmu_idx, &page_size) != 0)
+ return -1;
+ if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED)
+ return -1;
+ return phys_addr;
+}
+
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+{
+ return cpu_get_phys_page_nofault(env, addr, cpu_mmu_index(env));
+}
+#endif
+
+void cpu_reset(CPUSPARCState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
+ log_cpu_state(env, 0);
+ }
+
+ tlb_flush(env, 1);
+ env->cwp = 0;
+#ifndef TARGET_SPARC64
+ env->wim = 1;
+#endif
+ env->regwptr = env->regbase + (env->cwp * 16);
+ CC_OP = CC_OP_FLAGS;
+#if defined(CONFIG_USER_ONLY)
+#ifdef TARGET_SPARC64
+ env->cleanwin = env->nwindows - 2;
+ env->cansave = env->nwindows - 2;
+ env->pstate = PS_RMO | PS_PEF | PS_IE;
+ env->asi = 0x82; // Primary no-fault
+#endif
+#else
+#if !defined(TARGET_SPARC64)
+ env->psret = 0;
+ env->psrs = 1;
+ env->psrps = 1;
+#endif
+#ifdef TARGET_SPARC64
+ env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG;
+ env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0;
+ env->tl = env->maxtl;
+ cpu_tsptr(env)->tt = TT_POWER_ON_RESET;
+ env->lsu = 0;
+#else
+ env->mmuregs[0] &= ~(MMU_E | MMU_NF);
+ env->mmuregs[0] |= env->def->mmu_bm;
+#endif
+ env->pc = 0;
+ env->npc = env->pc + 4;
+#endif
+ env->cache_control = 0;
+}
+
+static int cpu_sparc_register(CPUSPARCState *env, const char *cpu_model)
+{
+ sparc_def_t def1, *def = &def1;
+
+ if (cpu_sparc_find_by_name(def, cpu_model) < 0)
+ return -1;
+
+ env->def = qemu_mallocz(sizeof(*def));
+ memcpy(env->def, def, sizeof(*def));
+#if defined(CONFIG_USER_ONLY)
+ if ((env->def->features & CPU_FEATURE_FLOAT))
+ env->def->features |= CPU_FEATURE_FLOAT128;
+#endif
+ env->cpu_model_str = cpu_model;
+ env->version = def->iu_version;
+ env->fsr = def->fpu_version;
+ env->nwindows = def->nwindows;
+#if !defined(TARGET_SPARC64)
+ env->mmuregs[0] |= def->mmu_version;
+ cpu_sparc_set_id(env, 0);
+ env->mxccregs[7] |= def->mxcc_version;
+#else
+ env->mmu_version = def->mmu_version;
+ env->maxtl = def->maxtl;
+ env->version |= def->maxtl << 8;
+ env->version |= def->nwindows - 1;
+#endif
+ return 0;
+}
+
+static void cpu_sparc_close(CPUSPARCState *env)
+{
+ free(env->def);
+ free(env);
+}
+
+CPUSPARCState *cpu_sparc_init(const char *cpu_model)
+{
+ CPUSPARCState *env;
+
+ env = qemu_mallocz(sizeof(CPUSPARCState));
+ cpu_exec_init(env);
+
+ gen_intermediate_code_init(env);
+
+ if (cpu_sparc_register(env, cpu_model) < 0) {
+ cpu_sparc_close(env);
+ return NULL;
+ }
+ qemu_init_vcpu(env);
+
+ return env;
+}
+
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu)
+{
+#if !defined(TARGET_SPARC64)
+ env->mxccregs[7] = ((cpu + 8) & 0xf) << 24;
+#endif
+}
+
+static const sparc_def_t sparc_defs[] = {
+#ifdef TARGET_SPARC64
+ {
+ .name = "Fujitsu Sparc64",
+ .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 4,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 III",
+ .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 5,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 IV",
+ .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 V",
+ .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc I",
+ .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc II",
+ .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIi",
+ .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIe",
+ .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III",
+ .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III Cu",
+ .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IIIi",
+ .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV",
+ .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_4,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV+",
+ .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT,
+ },
+ {
+ .name = "Sun UltraSparc IIIi+",
+ .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc T1",
+ // defined in sparc_ifu_fdp.v and ctu.h
+ .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "Sun UltraSparc T2",
+ // defined in tlu_asi_ctl.v and n2_revid_cust.v
+ .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "NEC UltraSparc I",
+ .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+#else
+ {
+ .name = "Fujitsu MB86900",
+ .iu_version = 0x00 << 24, /* Impl 0, ver 0 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x00 << 24, /* Impl 0, ver 0 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 7,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "Fujitsu MB86904",
+ .iu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu MB86907",
+ .iu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "LSI L64811",
+ .iu_version = 0x10 << 24, /* Impl 1, ver 0 */
+ .fpu_version = 1 << 17, /* FPU version 1 (LSI L64814) */
+ .mmu_version = 0x10 << 24,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "Cypress CY7C601",
+ .iu_version = 0x11 << 24, /* Impl 1, ver 1 */
+ .fpu_version = 3 << 17, /* FPU version 3 (Cypress CY7C602) */
+ .mmu_version = 0x10 << 24,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "Cypress CY7C611",
+ .iu_version = 0x13 << 24, /* Impl 1, ver 3 */
+ .fpu_version = 3 << 17, /* FPU version 3 (Cypress CY7C602) */
+ .mmu_version = 0x10 << 24,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "TI MicroSparc I",
+ .iu_version = 0x41000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x41000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x0000003f,
+ .nwindows = 7,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
+ CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FMUL,
+ },
+ {
+ .name = "TI MicroSparc II",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x02000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI MicroSparc IIep",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x04000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016bff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 40", // STP1020NPGA
+ .iu_version = 0x41000000, // SuperSPARC 2.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x00000800, // SuperSPARC 2.x, no MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 50", // STP1020PGA
+ .iu_version = 0x40000000, // SuperSPARC 3.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, // SuperSPARC 3.x, no MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 51",
+ .iu_version = 0x40000000, // SuperSPARC 3.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, // SuperSPARC 3.x, MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 60", // STP1020APGA
+ .iu_version = 0x40000000, // SuperSPARC 3.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, // SuperSPARC 3.x, no MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 61",
+ .iu_version = 0x44000000, // SuperSPARC 3.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, // SuperSPARC 3.x, MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc II",
+ .iu_version = 0x40000000, // SuperSPARC II 1.x
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x08000000, // SuperSPARC II 1.x, MXCC
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Ross RT625",
+ .iu_version = 0x1e000000,
+ .fpu_version = 1 << 17,
+ .mmu_version = 0x1e000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Ross RT620",
+ .iu_version = 0x1f000000,
+ .fpu_version = 1 << 17,
+ .mmu_version = 0x1f000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "BIT B5010",
+ .iu_version = 0x20000000,
+ .fpu_version = 0 << 17, /* B5010/B5110/B5120/B5210 */
+ .mmu_version = 0x20000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "Matsushita MN10501",
+ .iu_version = 0x50000000,
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x50000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_MUL | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FSMULD,
+ },
+ {
+ .name = "Weitek W8601",
+ .iu_version = 0x90 << 24, /* Impl 9, ver 0 */
+ .fpu_version = 3 << 17, /* FPU version 3 (Weitek WTL3170/2) */
+ .mmu_version = 0x10 << 24,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "LEON2",
+ .iu_version = 0xf2000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf2000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN,
+ },
+ {
+ .name = "LEON3",
+ .iu_version = 0xf3000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf3000000,
+ .mmu_bm = 0x00000000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN |
+ CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL,
+ },
+#endif
+};
+
+static const char * const feature_name[] = {
+ "float",
+ "float128",
+ "swap",
+ "mul",
+ "div",
+ "flush",
+ "fsqrt",
+ "fmul",
+ "vis1",
+ "vis2",
+ "fsmuld",
+ "hypv",
+ "cmt",
+ "gl",
+};
+
+static void print_features(FILE *f, fprintf_function cpu_fprintf,
+ uint32_t features, const char *prefix)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(feature_name); i++)
+ if (feature_name[i] && (features & (1 << i))) {
+ if (prefix)
+ (*cpu_fprintf)(f, "%s", prefix);
+ (*cpu_fprintf)(f, "%s ", feature_name[i]);
+ }
+}
+
+static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(feature_name); i++)
+ if (feature_name[i] && !strcmp(flagname, feature_name[i])) {
+ *features |= 1 << i;
+ return;
+ }
+ fprintf(stderr, "CPU feature %s not found\n", flagname);
+}
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
+{
+ unsigned int i;
+ const sparc_def_t *def = NULL;
+ char *s = strdup(cpu_model);
+ char *featurestr, *name = strtok(s, ",");
+ uint32_t plus_features = 0;
+ uint32_t minus_features = 0;
+ uint64_t iu_version;
+ uint32_t fpu_version, mmu_version, nwindows;
+
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ if (strcasecmp(name, sparc_defs[i].name) == 0) {
+ def = &sparc_defs[i];
+ }
+ }
+ if (!def)
+ goto error;
+ memcpy(cpu_def, def, sizeof(*def));
+
+ featurestr = strtok(NULL, ",");
+ while (featurestr) {
+ char *val;
+
+ if (featurestr[0] == '+') {
+ add_flagname_to_bitmaps(featurestr + 1, &plus_features);
+ } else if (featurestr[0] == '-') {
+ add_flagname_to_bitmaps(featurestr + 1, &minus_features);
+ } else if ((val = strchr(featurestr, '='))) {
+ *val = 0; val++;
+ if (!strcmp(featurestr, "iu_version")) {
+ char *err;
+
+ iu_version = strtoll(val, &err, 0);
+ if (!*val || *err) {
+ fprintf(stderr, "bad numerical value %s\n", val);
+ goto error;
+ }
+ cpu_def->iu_version = iu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version);
+#endif
+ } else if (!strcmp(featurestr, "fpu_version")) {
+ char *err;
+
+ fpu_version = strtol(val, &err, 0);
+ if (!*val || *err) {
+ fprintf(stderr, "bad numerical value %s\n", val);
+ goto error;
+ }
+ cpu_def->fpu_version = fpu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "fpu_version %x\n", fpu_version);
+#endif
+ } else if (!strcmp(featurestr, "mmu_version")) {
+ char *err;
+
+ mmu_version = strtol(val, &err, 0);
+ if (!*val || *err) {
+ fprintf(stderr, "bad numerical value %s\n", val);
+ goto error;
+ }
+ cpu_def->mmu_version = mmu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "mmu_version %x\n", mmu_version);
+#endif
+ } else if (!strcmp(featurestr, "nwindows")) {
+ char *err;
+
+ nwindows = strtol(val, &err, 0);
+ if (!*val || *err || nwindows > MAX_NWINDOWS ||
+ nwindows < MIN_NWINDOWS) {
+ fprintf(stderr, "bad numerical value %s\n", val);
+ goto error;
+ }
+ cpu_def->nwindows = nwindows;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "nwindows %d\n", nwindows);
+#endif
+ } else {
+ fprintf(stderr, "unrecognized feature %s\n", featurestr);
+ goto error;
+ }
+ } else {
+ fprintf(stderr, "feature string `%s' not in format "
+ "(+feature|-feature|feature=xyz)\n", featurestr);
+ goto error;
+ }
+ featurestr = strtok(NULL, ",");
+ }
+ cpu_def->features |= plus_features;
+ cpu_def->features &= ~minus_features;
+#ifdef DEBUG_FEATURES
+ print_features(stderr, fprintf, cpu_def->features, NULL);
+#endif
+ free(s);
+ return 0;
+
+ error:
+ free(s);
+ return -1;
+}
+
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx " FPU %08x MMU %08x NWINS %d ",
+ sparc_defs[i].name,
+ sparc_defs[i].iu_version,
+ sparc_defs[i].fpu_version,
+ sparc_defs[i].mmu_version,
+ sparc_defs[i].nwindows);
+ print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES &
+ ~sparc_defs[i].features, "-");
+ print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES &
+ sparc_defs[i].features, "+");
+ (*cpu_fprintf)(f, "\n");
+ }
+ (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): ");
+ print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL);
+ (*cpu_fprintf)(f, "\n");
+ (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): ");
+ print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL);
+ (*cpu_fprintf)(f, "\n");
+ (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version "
+ "fpu_version mmu_version nwindows\n");
+}
+
+static void cpu_print_cc(FILE *f, fprintf_function cpu_fprintf,
+ uint32_t cc)
+{
+ cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG? 'N' : '-',
+ cc & PSR_ZERO? 'Z' : '-', cc & PSR_OVF? 'V' : '-',
+ cc & PSR_CARRY? 'C' : '-');
+}
+
+#ifdef TARGET_SPARC64
+#define REGS_PER_LINE 4
+#else
+#define REGS_PER_LINE 8
+#endif
+
+void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ int i, x;
+
+ cpu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc,
+ env->npc);
+ cpu_fprintf(f, "General Registers:\n");
+
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ cpu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1);
+ }
+ cpu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ cpu_fprintf(f, "\nCurrent Register Window:\n");
+ for (x = 0; x < 3; x++) {
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ cpu_fprintf(f, "%%%c%d-%d: ",
+ x == 0 ? 'o' : (x == 1 ? 'l' : 'i'),
+ i, i + REGS_PER_LINE - 1);
+ }
+ cpu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ }
+ cpu_fprintf(f, "\nFloating Point Registers:\n");
+ for (i = 0; i < TARGET_FPREGS; i++) {
+ if ((i & 3) == 0)
+ cpu_fprintf(f, "%%f%02d:", i);
+ cpu_fprintf(f, " %016f", *(float *)&env->fpr[i]);
+ if ((i & 3) == 3)
+ cpu_fprintf(f, "\n");
+ }
+#ifdef TARGET_SPARC64
+ cpu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate,
+ (unsigned)cpu_get_ccr(env));
+ cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT);
+ cpu_fprintf(f, " xcc: ");
+ cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4));
+ cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl,
+ env->psrpil);
+ cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d "
+ "cleanwin: %d cwp: %d\n",
+ env->cansave, env->canrestore, env->otherwin, env->wstate,
+ env->cleanwin, env->nwindows - 1 - env->cwp);
+ cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: "
+ TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs);
+#else
+ cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env));
+ cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env));
+ cpu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs? 'S' : '-',
+ env->psrps? 'P' : '-', env->psret? 'E' : '-',
+ env->wim);
+ cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n",
+ env->fsr, env->y);
+#endif
+}
diff --git a/target-sparc/helper.h b/target-sparc/helper.h
new file mode 100644
index 0000000..12e8557
--- /dev/null
+++ b/target-sparc/helper.h
@@ -0,0 +1,166 @@
+#include "def-helper.h"
+
+#ifndef TARGET_SPARC64
+DEF_HELPER_0(rett, void)
+DEF_HELPER_1(wrpsr, void, tl)
+DEF_HELPER_0(rdpsr, tl)
+#else
+DEF_HELPER_1(wrpil, void, tl)
+DEF_HELPER_1(wrpstate, void, tl)
+DEF_HELPER_0(done, void)
+DEF_HELPER_0(retry, void)
+DEF_HELPER_0(flushw, void)
+DEF_HELPER_0(saved, void)
+DEF_HELPER_0(restored, void)
+DEF_HELPER_0(rdccr, tl)
+DEF_HELPER_1(wrccr, void, tl)
+DEF_HELPER_0(rdcwp, tl)
+DEF_HELPER_1(wrcwp, void, tl)
+DEF_HELPER_2(array8, tl, tl, tl)
+DEF_HELPER_2(alignaddr, tl, tl, tl)
+DEF_HELPER_1(popc, tl, tl)
+DEF_HELPER_3(ldda_asi, void, tl, int, int)
+DEF_HELPER_4(ldf_asi, void, tl, int, int, int)
+DEF_HELPER_4(stf_asi, void, tl, int, int, int)
+DEF_HELPER_4(cas_asi, tl, tl, tl, tl, i32)
+DEF_HELPER_4(casx_asi, tl, tl, tl, tl, i32)
+DEF_HELPER_1(set_softint, void, i64)
+DEF_HELPER_1(clear_softint, void, i64)
+DEF_HELPER_1(write_softint, void, i64)
+DEF_HELPER_2(tick_set_count, void, ptr, i64)
+DEF_HELPER_1(tick_get_count, i64, ptr)
+DEF_HELPER_2(tick_set_limit, void, ptr, i64)
+#endif
+DEF_HELPER_2(check_align, void, tl, i32)
+DEF_HELPER_0(debug, void)
+DEF_HELPER_0(save, void)
+DEF_HELPER_0(restore, void)
+DEF_HELPER_1(flush, void, tl)
+DEF_HELPER_2(udiv, tl, tl, tl)
+DEF_HELPER_2(udiv_cc, tl, tl, tl)
+DEF_HELPER_2(sdiv, tl, tl, tl)
+DEF_HELPER_2(sdiv_cc, tl, tl, tl)
+DEF_HELPER_2(stdf, void, tl, int)
+DEF_HELPER_2(lddf, void, tl, int)
+DEF_HELPER_2(ldqf, void, tl, int)
+DEF_HELPER_2(stqf, void, tl, int)
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+DEF_HELPER_4(ld_asi, i64, tl, int, int, int)
+DEF_HELPER_4(st_asi, void, tl, i64, int, int)
+#endif
+DEF_HELPER_1(ldfsr, void, i32)
+DEF_HELPER_0(check_ieee_exceptions, void)
+DEF_HELPER_0(clear_float_exceptions, void)
+DEF_HELPER_1(fabss, f32, f32)
+DEF_HELPER_1(fsqrts, f32, f32)
+DEF_HELPER_0(fsqrtd, void)
+DEF_HELPER_2(fcmps, void, f32, f32)
+DEF_HELPER_0(fcmpd, void)
+DEF_HELPER_2(fcmpes, void, f32, f32)
+DEF_HELPER_0(fcmped, void)
+DEF_HELPER_0(fsqrtq, void)
+DEF_HELPER_0(fcmpq, void)
+DEF_HELPER_0(fcmpeq, void)
+#ifdef TARGET_SPARC64
+DEF_HELPER_1(ldxfsr, void, i64)
+DEF_HELPER_0(fabsd, void)
+DEF_HELPER_2(fcmps_fcc1, void, f32, f32)
+DEF_HELPER_2(fcmps_fcc2, void, f32, f32)
+DEF_HELPER_2(fcmps_fcc3, void, f32, f32)
+DEF_HELPER_0(fcmpd_fcc1, void)
+DEF_HELPER_0(fcmpd_fcc2, void)
+DEF_HELPER_0(fcmpd_fcc3, void)
+DEF_HELPER_2(fcmpes_fcc1, void, f32, f32)
+DEF_HELPER_2(fcmpes_fcc2, void, f32, f32)
+DEF_HELPER_2(fcmpes_fcc3, void, f32, f32)
+DEF_HELPER_0(fcmped_fcc1, void)
+DEF_HELPER_0(fcmped_fcc2, void)
+DEF_HELPER_0(fcmped_fcc3, void)
+DEF_HELPER_0(fabsq, void)
+DEF_HELPER_0(fcmpq_fcc1, void)
+DEF_HELPER_0(fcmpq_fcc2, void)
+DEF_HELPER_0(fcmpq_fcc3, void)
+DEF_HELPER_0(fcmpeq_fcc1, void)
+DEF_HELPER_0(fcmpeq_fcc2, void)
+DEF_HELPER_0(fcmpeq_fcc3, void)
+#endif
+DEF_HELPER_1(raise_exception, void, int)
+DEF_HELPER_0(shutdown, void)
+#define F_HELPER_0_0(name) DEF_HELPER_0(f ## name, void)
+#define F_HELPER_DQ_0_0(name) \
+ F_HELPER_0_0(name ## d); \
+ F_HELPER_0_0(name ## q)
+
+F_HELPER_DQ_0_0(add);
+F_HELPER_DQ_0_0(sub);
+F_HELPER_DQ_0_0(mul);
+F_HELPER_DQ_0_0(div);
+
+DEF_HELPER_2(fadds, f32, f32, f32)
+DEF_HELPER_2(fsubs, f32, f32, f32)
+DEF_HELPER_2(fmuls, f32, f32, f32)
+DEF_HELPER_2(fdivs, f32, f32, f32)
+
+DEF_HELPER_2(fsmuld, void, f32, f32)
+F_HELPER_0_0(dmulq);
+
+DEF_HELPER_1(fnegs, f32, f32)
+DEF_HELPER_1(fitod, void, s32)
+DEF_HELPER_1(fitoq, void, s32)
+
+DEF_HELPER_1(fitos, f32, s32)
+
+#ifdef TARGET_SPARC64
+DEF_HELPER_0(fnegd, void)
+DEF_HELPER_0(fnegq, void)
+DEF_HELPER_0(fxtos, i32)
+F_HELPER_DQ_0_0(xto);
+#endif
+DEF_HELPER_0(fdtos, f32)
+DEF_HELPER_1(fstod, void, f32)
+DEF_HELPER_0(fqtos, f32)
+DEF_HELPER_1(fstoq, void, f32)
+F_HELPER_0_0(qtod);
+F_HELPER_0_0(dtoq);
+DEF_HELPER_1(fstoi, s32, f32)
+DEF_HELPER_0(fdtoi, s32)
+DEF_HELPER_0(fqtoi, s32)
+#ifdef TARGET_SPARC64
+DEF_HELPER_1(fstox, void, i32)
+F_HELPER_0_0(dtox);
+F_HELPER_0_0(qtox);
+F_HELPER_0_0(aligndata);
+
+F_HELPER_0_0(pmerge);
+F_HELPER_0_0(mul8x16);
+F_HELPER_0_0(mul8x16al);
+F_HELPER_0_0(mul8x16au);
+F_HELPER_0_0(mul8sux16);
+F_HELPER_0_0(mul8ulx16);
+F_HELPER_0_0(muld8sux16);
+F_HELPER_0_0(muld8ulx16);
+F_HELPER_0_0(expand);
+#define VIS_HELPER(name) \
+ F_HELPER_0_0(name##16); \
+ DEF_HELPER_2(f ## name ## 16s, i32, i32, i32) \
+ F_HELPER_0_0(name##32); \
+ DEF_HELPER_2(f ## name ## 32s, i32, i32, i32)
+
+VIS_HELPER(padd);
+VIS_HELPER(psub);
+#define VIS_CMPHELPER(name) \
+ F_HELPER_0_0(name##16); \
+ F_HELPER_0_0(name##32)
+VIS_CMPHELPER(cmpgt);
+VIS_CMPHELPER(cmpeq);
+VIS_CMPHELPER(cmple);
+VIS_CMPHELPER(cmpne);
+#endif
+#undef F_HELPER_0_0
+#undef F_HELPER_DQ_0_0
+#undef VIS_HELPER
+#undef VIS_CMPHELPER
+DEF_HELPER_0(compute_psr, void);
+DEF_HELPER_0(compute_C_icc, i32);
+
+#include "def-helper.h"
diff --git a/target-sparc/machine.c b/target-sparc/machine.c
new file mode 100644
index 0000000..752e431
--- /dev/null
+++ b/target-sparc/machine.c
@@ -0,0 +1,199 @@
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "qemu-timer.h"
+
+#include "exec-all.h"
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ CPUState *env = opaque;
+ int i;
+ uint32_t tmp;
+
+ // if env->cwp == env->nwindows - 1, this will set the ins of the last
+ // window as the outs of the first window
+ cpu_set_cwp(env, env->cwp);
+
+ for(i = 0; i < 8; i++)
+ qemu_put_betls(f, &env->gregs[i]);
+ qemu_put_be32s(f, &env->nwindows);
+ for(i = 0; i < env->nwindows * 16; i++)
+ qemu_put_betls(f, &env->regbase[i]);
+
+ /* FPU */
+ for(i = 0; i < TARGET_FPREGS; i++) {
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = env->fpr[i];
+ qemu_put_be32(f, u.i);
+ }
+
+ qemu_put_betls(f, &env->pc);
+ qemu_put_betls(f, &env->npc);
+ qemu_put_betls(f, &env->y);
+ tmp = cpu_get_psr(env);
+ qemu_put_be32(f, tmp);
+ qemu_put_betls(f, &env->fsr);
+ qemu_put_betls(f, &env->tbr);
+ tmp = env->interrupt_index;
+ qemu_put_be32(f, tmp);
+ qemu_put_be32s(f, &env->pil_in);
+#ifndef TARGET_SPARC64
+ qemu_put_be32s(f, &env->wim);
+ /* MMU */
+ for (i = 0; i < 32; i++)
+ qemu_put_be32s(f, &env->mmuregs[i]);
+#else
+ qemu_put_be64s(f, &env->lsu);
+ for (i = 0; i < 16; i++) {
+ qemu_put_be64s(f, &env->immuregs[i]);
+ qemu_put_be64s(f, &env->dmmuregs[i]);
+ }
+ for (i = 0; i < 64; i++) {
+ qemu_put_be64s(f, &env->itlb[i].tag);
+ qemu_put_be64s(f, &env->itlb[i].tte);
+ qemu_put_be64s(f, &env->dtlb[i].tag);
+ qemu_put_be64s(f, &env->dtlb[i].tte);
+ }
+ qemu_put_be32s(f, &env->mmu_version);
+ for (i = 0; i < MAXTL_MAX; i++) {
+ qemu_put_be64s(f, &env->ts[i].tpc);
+ qemu_put_be64s(f, &env->ts[i].tnpc);
+ qemu_put_be64s(f, &env->ts[i].tstate);
+ qemu_put_be32s(f, &env->ts[i].tt);
+ }
+ qemu_put_be32s(f, &env->xcc);
+ qemu_put_be32s(f, &env->asi);
+ qemu_put_be32s(f, &env->pstate);
+ qemu_put_be32s(f, &env->tl);
+ qemu_put_be32s(f, &env->cansave);
+ qemu_put_be32s(f, &env->canrestore);
+ qemu_put_be32s(f, &env->otherwin);
+ qemu_put_be32s(f, &env->wstate);
+ qemu_put_be32s(f, &env->cleanwin);
+ for (i = 0; i < 8; i++)
+ qemu_put_be64s(f, &env->agregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_put_be64s(f, &env->bgregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_put_be64s(f, &env->igregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_put_be64s(f, &env->mgregs[i]);
+ qemu_put_be64s(f, &env->fprs);
+ qemu_put_be64s(f, &env->tick_cmpr);
+ qemu_put_be64s(f, &env->stick_cmpr);
+ cpu_put_timer(f, env->tick);
+ cpu_put_timer(f, env->stick);
+ qemu_put_be64s(f, &env->gsr);
+ qemu_put_be32s(f, &env->gl);
+ qemu_put_be64s(f, &env->hpstate);
+ for (i = 0; i < MAXTL_MAX; i++)
+ qemu_put_be64s(f, &env->htstate[i]);
+ qemu_put_be64s(f, &env->hintp);
+ qemu_put_be64s(f, &env->htba);
+ qemu_put_be64s(f, &env->hver);
+ qemu_put_be64s(f, &env->hstick_cmpr);
+ qemu_put_be64s(f, &env->ssr);
+ cpu_put_timer(f, env->hstick);
+#endif
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CPUState *env = opaque;
+ int i;
+ uint32_t tmp;
+
+ if (version_id < 6)
+ return -EINVAL;
+ for(i = 0; i < 8; i++)
+ qemu_get_betls(f, &env->gregs[i]);
+ qemu_get_be32s(f, &env->nwindows);
+ for(i = 0; i < env->nwindows * 16; i++)
+ qemu_get_betls(f, &env->regbase[i]);
+
+ /* FPU */
+ for(i = 0; i < TARGET_FPREGS; i++) {
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = qemu_get_be32(f);
+ env->fpr[i] = u.f;
+ }
+
+ qemu_get_betls(f, &env->pc);
+ qemu_get_betls(f, &env->npc);
+ qemu_get_betls(f, &env->y);
+ tmp = qemu_get_be32(f);
+ env->cwp = 0; /* needed to ensure that the wrapping registers are
+ correctly updated */
+ cpu_put_psr(env, tmp);
+ qemu_get_betls(f, &env->fsr);
+ qemu_get_betls(f, &env->tbr);
+ tmp = qemu_get_be32(f);
+ env->interrupt_index = tmp;
+ qemu_get_be32s(f, &env->pil_in);
+#ifndef TARGET_SPARC64
+ qemu_get_be32s(f, &env->wim);
+ /* MMU */
+ for (i = 0; i < 32; i++)
+ qemu_get_be32s(f, &env->mmuregs[i]);
+#else
+ qemu_get_be64s(f, &env->lsu);
+ for (i = 0; i < 16; i++) {
+ qemu_get_be64s(f, &env->immuregs[i]);
+ qemu_get_be64s(f, &env->dmmuregs[i]);
+ }
+ for (i = 0; i < 64; i++) {
+ qemu_get_be64s(f, &env->itlb[i].tag);
+ qemu_get_be64s(f, &env->itlb[i].tte);
+ qemu_get_be64s(f, &env->dtlb[i].tag);
+ qemu_get_be64s(f, &env->dtlb[i].tte);
+ }
+ qemu_get_be32s(f, &env->mmu_version);
+ for (i = 0; i < MAXTL_MAX; i++) {
+ qemu_get_be64s(f, &env->ts[i].tpc);
+ qemu_get_be64s(f, &env->ts[i].tnpc);
+ qemu_get_be64s(f, &env->ts[i].tstate);
+ qemu_get_be32s(f, &env->ts[i].tt);
+ }
+ qemu_get_be32s(f, &env->xcc);
+ qemu_get_be32s(f, &env->asi);
+ qemu_get_be32s(f, &env->pstate);
+ qemu_get_be32s(f, &env->tl);
+ qemu_get_be32s(f, &env->cansave);
+ qemu_get_be32s(f, &env->canrestore);
+ qemu_get_be32s(f, &env->otherwin);
+ qemu_get_be32s(f, &env->wstate);
+ qemu_get_be32s(f, &env->cleanwin);
+ for (i = 0; i < 8; i++)
+ qemu_get_be64s(f, &env->agregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_get_be64s(f, &env->bgregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_get_be64s(f, &env->igregs[i]);
+ for (i = 0; i < 8; i++)
+ qemu_get_be64s(f, &env->mgregs[i]);
+ qemu_get_be64s(f, &env->fprs);
+ qemu_get_be64s(f, &env->tick_cmpr);
+ qemu_get_be64s(f, &env->stick_cmpr);
+ cpu_get_timer(f, env->tick);
+ cpu_get_timer(f, env->stick);
+ qemu_get_be64s(f, &env->gsr);
+ qemu_get_be32s(f, &env->gl);
+ qemu_get_be64s(f, &env->hpstate);
+ for (i = 0; i < MAXTL_MAX; i++)
+ qemu_get_be64s(f, &env->htstate[i]);
+ qemu_get_be64s(f, &env->hintp);
+ qemu_get_be64s(f, &env->htba);
+ qemu_get_be64s(f, &env->hver);
+ qemu_get_be64s(f, &env->hstick_cmpr);
+ qemu_get_be64s(f, &env->ssr);
+ cpu_get_timer(f, env->hstick);
+#endif
+ tlb_flush(env, 1);
+ return 0;
+}
diff --git a/target-sparc/op_helper.c b/target-sparc/op_helper.c
new file mode 100644
index 0000000..854f168
--- /dev/null
+++ b/target-sparc/op_helper.c
@@ -0,0 +1,4535 @@
+#include "exec.h"
+#include "host-utils.h"
+#include "helper.h"
+#include "sysemu.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_MXCC
+//#define DEBUG_UNALIGNED
+//#define DEBUG_UNASSIGNED
+//#define DEBUG_ASI
+//#define DEBUG_PCALL
+//#define DEBUG_PSTATE
+//#define DEBUG_CACHE_CONTROL
+
+#ifdef DEBUG_MMU
+#define DPRINTF_MMU(fmt, ...) \
+ do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MMU(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_MXCC
+#define DPRINTF_MXCC(fmt, ...) \
+ do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MXCC(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_ASI
+#define DPRINTF_ASI(fmt, ...) \
+ do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
+#endif
+
+#ifdef DEBUG_PSTATE
+#define DPRINTF_PSTATE(fmt, ...) \
+ do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_CACHE_CONTROL
+#define DPRINTF_CACHE_CONTROL(fmt, ...) \
+ do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
+#else
+#define AM_CHECK(env1) (1)
+#endif
+#endif
+
+#define DT0 (env->dt0)
+#define DT1 (env->dt1)
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+/* Leon3 cache control */
+
+/* Cache control: emulate the behavior of cache control registers but without
+ any effect on the emulated */
+
+#define CACHE_STATE_MASK 0x3
+#define CACHE_DISABLED 0x0
+#define CACHE_FROZEN 0x1
+#define CACHE_ENABLED 0x3
+
+/* Cache Control register fields */
+
+#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */
+#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */
+#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */
+#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */
+#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */
+#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */
+#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
+#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
+
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
+ int is_asi, int size);
+#endif
+
+#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+// Calculates TSB pointer value for fault page size 8k or 64k
+static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
+ uint64_t tag_access_register,
+ int page_size)
+{
+ uint64_t tsb_base = tsb_register & ~0x1fffULL;
+ int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
+ int tsb_size = tsb_register & 0xf;
+
+ // discard lower 13 bits which hold tag access context
+ uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
+
+ // now reorder bits
+ uint64_t tsb_base_mask = ~0x1fffULL;
+ uint64_t va = tag_access_va;
+
+ // move va bits to correct position
+ if (page_size == 8*1024) {
+ va >>= 9;
+ } else if (page_size == 64*1024) {
+ va >>= 12;
+ }
+
+ if (tsb_size) {
+ tsb_base_mask <<= tsb_size;
+ }
+
+ // calculate tsb_base mask and adjust va if split is in use
+ if (tsb_split) {
+ if (page_size == 8*1024) {
+ va &= ~(1ULL << (13 + tsb_size));
+ } else if (page_size == 64*1024) {
+ va |= (1ULL << (13 + tsb_size));
+ }
+ tsb_base_mask <<= 1;
+ }
+
+ return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
+}
+
+// Calculates tag target register value by reordering bits
+// in tag access register
+static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
+{
+ return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
+}
+
+static void replace_tlb_entry(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ CPUState *env1)
+{
+ target_ulong mask, size, va, offset;
+
+ // flush page range if translation is valid
+ if (TTE_IS_VALID(tlb->tte)) {
+
+ mask = 0xffffffffffffe000ULL;
+ mask <<= 3 * ((tlb->tte >> 61) & 3);
+ size = ~mask + 1;
+
+ va = tlb->tag & mask;
+
+ for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
+ tlb_flush_page(env1, va + offset);
+ }
+ }
+
+ tlb->tag = tlb_tag;
+ tlb->tte = tlb_tte;
+}
+
+static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
+ const char* strmmu, CPUState *env1)
+{
+ unsigned int i;
+ target_ulong mask;
+ uint64_t context;
+
+ int is_demap_context = (demap_addr >> 6) & 1;
+
+ // demap context
+ switch ((demap_addr >> 4) & 3) {
+ case 0: // primary
+ context = env1->dmmu.mmu_primary_context;
+ break;
+ case 1: // secondary
+ context = env1->dmmu.mmu_secondary_context;
+ break;
+ case 2: // nucleus
+ context = 0;
+ break;
+ case 3: // reserved
+ default:
+ return;
+ }
+
+ for (i = 0; i < 64; i++) {
+ if (TTE_IS_VALID(tlb[i].tte)) {
+
+ if (is_demap_context) {
+ // will remove non-global entries matching context value
+ if (TTE_IS_GLOBAL(tlb[i].tte) ||
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ } else {
+ // demap page
+ // will remove any entry matching VA
+ mask = 0xffffffffffffe000ULL;
+ mask <<= 3 * ((tlb[i].tte >> 61) & 3);
+
+ if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
+ continue;
+ }
+
+ // entry should be global or matching context value
+ if (!TTE_IS_GLOBAL(tlb[i].tte) &&
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ }
+
+ replace_tlb_entry(&tlb[i], 0, 0, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ }
+ }
+}
+
+static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ const char* strmmu, CPUState *env1)
+{
+ unsigned int i, replace_used;
+
+ // Try replacing invalid entry
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_VALID(tlb[i].tte)) {
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ return;
+ }
+ }
+
+ // All entries are valid, try replacing unlocked entry
+
+ for (replace_used = 0; replace_used < 2; ++replace_used) {
+
+ // Used entries are not replaced on first pass
+
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
+
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
+ strmmu, (replace_used?"used":"unused"), i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ return;
+ }
+ }
+
+ // Now reset used bit and search for unused entries again
+
+ for (i = 0; i < 64; i++) {
+ TTE_SET_UNUSED(tlb[i].tte);
+ }
+ }
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
+#endif
+ // error state?
+}
+
+#endif
+
+static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
+{
+#ifdef TARGET_SPARC64
+ if (AM_CHECK(env1))
+ addr &= 0xffffffffULL;
+#endif
+ return addr;
+}
+
+/* returns true if access using this ASI is to have address translated by MMU
+ otherwise access is to raw physical address */
+static inline int is_translating_asi(int asi)
+{
+#ifdef TARGET_SPARC64
+ /* Ultrasparc IIi translating asi
+ - note this list is defined by cpu implementation
+ */
+ switch (asi) {
+ case 0x04 ... 0x11:
+ case 0x18 ... 0x19:
+ case 0x24 ... 0x2C:
+ case 0x70 ... 0x73:
+ case 0x78 ... 0x79:
+ case 0x80 ... 0xFF:
+ return 1;
+
+ default:
+ return 0;
+ }
+#else
+ /* TODO: check sparc32 bits */
+ return 0;
+#endif
+}
+
+static inline target_ulong asi_address_mask(CPUState *env1,
+ int asi, target_ulong addr)
+{
+ if (is_translating_asi(asi)) {
+ return address_mask(env, addr);
+ } else {
+ return addr;
+ }
+}
+
+static void raise_exception(int tt)
+{
+ env->exception_index = tt;
+ cpu_loop_exit();
+}
+
+void HELPER(raise_exception)(int tt)
+{
+ raise_exception(tt);
+}
+
+void helper_shutdown(void)
+{
+#if !defined(CONFIG_USER_ONLY)
+ qemu_system_shutdown_request();
+#endif
+}
+
+void helper_check_align(target_ulong addr, uint32_t align)
+{
+ if (addr & align) {
+#ifdef DEBUG_UNALIGNED
+ printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+ raise_exception(TT_UNALIGNED);
+ }
+}
+
+#define F_HELPER(name, p) void helper_f##name##p(void)
+
+#define F_BINOP(name) \
+ float32 helper_f ## name ## s (float32 src1, float32 src2) \
+ { \
+ return float32_ ## name (src1, src2, &env->fp_status); \
+ } \
+ F_HELPER(name, d) \
+ { \
+ DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
+ } \
+ F_HELPER(name, q) \
+ { \
+ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
+ }
+
+F_BINOP(add);
+F_BINOP(sub);
+F_BINOP(mul);
+F_BINOP(div);
+#undef F_BINOP
+
+void helper_fsmuld(float32 src1, float32 src2)
+{
+ DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
+ float32_to_float64(src2, &env->fp_status),
+ &env->fp_status);
+}
+
+void helper_fdmulq(void)
+{
+ QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
+ float64_to_float128(DT1, &env->fp_status),
+ &env->fp_status);
+}
+
+float32 helper_fnegs(float32 src)
+{
+ return float32_chs(src);
+}
+
+#ifdef TARGET_SPARC64
+F_HELPER(neg, d)
+{
+ DT0 = float64_chs(DT1);
+}
+
+F_HELPER(neg, q)
+{
+ QT0 = float128_chs(QT1);
+}
+#endif
+
+/* Integer to float conversion. */
+float32 helper_fitos(int32_t src)
+{
+ return int32_to_float32(src, &env->fp_status);
+}
+
+void helper_fitod(int32_t src)
+{
+ DT0 = int32_to_float64(src, &env->fp_status);
+}
+
+void helper_fitoq(int32_t src)
+{
+ QT0 = int32_to_float128(src, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+float32 helper_fxtos(void)
+{
+ return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
+}
+
+F_HELPER(xto, d)
+{
+ DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
+}
+
+F_HELPER(xto, q)
+{
+ QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
+}
+#endif
+#undef F_HELPER
+
+/* floating point conversion */
+float32 helper_fdtos(void)
+{
+ return float64_to_float32(DT1, &env->fp_status);
+}
+
+void helper_fstod(float32 src)
+{
+ DT0 = float32_to_float64(src, &env->fp_status);
+}
+
+float32 helper_fqtos(void)
+{
+ return float128_to_float32(QT1, &env->fp_status);
+}
+
+void helper_fstoq(float32 src)
+{
+ QT0 = float32_to_float128(src, &env->fp_status);
+}
+
+void helper_fqtod(void)
+{
+ DT0 = float128_to_float64(QT1, &env->fp_status);
+}
+
+void helper_fdtoq(void)
+{
+ QT0 = float64_to_float128(DT1, &env->fp_status);
+}
+
+/* Float to integer conversion. */
+int32_t helper_fstoi(float32 src)
+{
+ return float32_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fdtoi(void)
+{
+ return float64_to_int32_round_to_zero(DT1, &env->fp_status);
+}
+
+int32_t helper_fqtoi(void)
+{
+ return float128_to_int32_round_to_zero(QT1, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+void helper_fstox(float32 src)
+{
+ *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+void helper_fdtox(void)
+{
+ *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
+}
+
+void helper_fqtox(void)
+{
+ *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
+}
+
+void helper_faligndata(void)
+{
+ uint64_t tmp;
+
+ tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
+ /* on many architectures a shift of 64 does nothing */
+ if ((env->gsr & 7) != 0) {
+ tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
+ }
+ *((uint64_t *)&DT0) = tmp;
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define VIS_B64(n) b[7 - (n)]
+#define VIS_W64(n) w[3 - (n)]
+#define VIS_SW64(n) sw[3 - (n)]
+#define VIS_L64(n) l[1 - (n)]
+#define VIS_B32(n) b[3 - (n)]
+#define VIS_W32(n) w[1 - (n)]
+#else
+#define VIS_B64(n) b[n]
+#define VIS_W64(n) w[n]
+#define VIS_SW64(n) sw[n]
+#define VIS_L64(n) l[n]
+#define VIS_B32(n) b[n]
+#define VIS_W32(n) w[n]
+#endif
+
+typedef union {
+ uint8_t b[8];
+ uint16_t w[4];
+ int16_t sw[4];
+ uint32_t l[2];
+ float64 d;
+} vis64;
+
+typedef union {
+ uint8_t b[4];
+ uint16_t w[2];
+ uint32_t l;
+ float32 f;
+} vis32;
+
+void helper_fpmerge(void)
+{
+ vis64 s, d;
+
+ s.d = DT0;
+ d.d = DT1;
+
+ // Reverse calculation order to handle overlap
+ d.VIS_B64(7) = s.VIS_B64(3);
+ d.VIS_B64(6) = d.VIS_B64(3);
+ d.VIS_B64(5) = s.VIS_B64(2);
+ d.VIS_B64(4) = d.VIS_B64(2);
+ d.VIS_B64(3) = s.VIS_B64(1);
+ d.VIS_B64(2) = d.VIS_B64(1);
+ d.VIS_B64(1) = s.VIS_B64(0);
+ //d.VIS_B64(0) = d.VIS_B64(0);
+
+ DT0 = d.d;
+}
+
+void helper_fmul8x16(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmul8x16al(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmul8x16au(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmul8sux16(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmul8ulx16(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmuld8sux16(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_L64(r) = tmp;
+
+ // Reverse calculation order to handle overlap
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fmuld8ulx16(void)
+{
+ vis64 s, d;
+ uint32_t tmp;
+
+ s.d = DT0;
+ d.d = DT1;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) \
+ tmp += 0x100; \
+ d.VIS_L64(r) = tmp;
+
+ // Reverse calculation order to handle overlap
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ DT0 = d.d;
+}
+
+void helper_fexpand(void)
+{
+ vis32 s;
+ vis64 d;
+
+ s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
+ d.d = DT1;
+ d.VIS_W64(0) = s.VIS_B32(0) << 4;
+ d.VIS_W64(1) = s.VIS_B32(1) << 4;
+ d.VIS_W64(2) = s.VIS_B32(2) << 4;
+ d.VIS_W64(3) = s.VIS_B32(3) << 4;
+
+ DT0 = d.d;
+}
+
+#define VIS_HELPER(name, F) \
+ void name##16(void) \
+ { \
+ vis64 s, d; \
+ \
+ s.d = DT0; \
+ d.d = DT1; \
+ \
+ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
+ d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
+ d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
+ d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
+ \
+ DT0 = d.d; \
+ } \
+ \
+ uint32_t name##16s(uint32_t src1, uint32_t src2) \
+ { \
+ vis32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
+ d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
+ \
+ return d.l; \
+ } \
+ \
+ void name##32(void) \
+ { \
+ vis64 s, d; \
+ \
+ s.d = DT0; \
+ d.d = DT1; \
+ \
+ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
+ d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
+ \
+ DT0 = d.d; \
+ } \
+ \
+ uint32_t name##32s(uint32_t src1, uint32_t src2) \
+ { \
+ vis32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.l = F(d.l, s.l); \
+ \
+ return d.l; \
+ }
+
+#define FADD(a, b) ((a) + (b))
+#define FSUB(a, b) ((a) - (b))
+VIS_HELPER(helper_fpadd, FADD)
+VIS_HELPER(helper_fpsub, FSUB)
+
+#define VIS_CMPHELPER(name, F) \
+ void name##16(void) \
+ { \
+ vis64 s, d; \
+ \
+ s.d = DT0; \
+ d.d = DT1; \
+ \
+ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
+ d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
+ d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
+ d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
+ \
+ DT0 = d.d; \
+ } \
+ \
+ void name##32(void) \
+ { \
+ vis64 s, d; \
+ \
+ s.d = DT0; \
+ d.d = DT1; \
+ \
+ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
+ d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
+ \
+ DT0 = d.d; \
+ }
+
+#define FCMPGT(a, b) ((a) > (b))
+#define FCMPEQ(a, b) ((a) == (b))
+#define FCMPLE(a, b) ((a) <= (b))
+#define FCMPNE(a, b) ((a) != (b))
+
+VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
+VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
+VIS_CMPHELPER(helper_fcmple, FCMPLE)
+VIS_CMPHELPER(helper_fcmpne, FCMPNE)
+#endif
+
+void helper_check_ieee_exceptions(void)
+{
+ target_ulong status;
+
+ status = get_float_exception_flags(&env->fp_status);
+ if (status) {
+ /* Copy IEEE 754 flags into FSR */
+ if (status & float_flag_invalid)
+ env->fsr |= FSR_NVC;
+ if (status & float_flag_overflow)
+ env->fsr |= FSR_OFC;
+ if (status & float_flag_underflow)
+ env->fsr |= FSR_UFC;
+ if (status & float_flag_divbyzero)
+ env->fsr |= FSR_DZC;
+ if (status & float_flag_inexact)
+ env->fsr |= FSR_NXC;
+
+ if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
+ /* Unmasked exception, generate a trap */
+ env->fsr |= FSR_FTT_IEEE_EXCP;
+ raise_exception(TT_FP_EXCP);
+ } else {
+ /* Accumulate exceptions */
+ env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
+ }
+ }
+}
+
+void helper_clear_float_exceptions(void)
+{
+ set_float_exception_flags(0, &env->fp_status);
+}
+
+float32 helper_fabss(float32 src)
+{
+ return float32_abs(src);
+}
+
+#ifdef TARGET_SPARC64
+void helper_fabsd(void)
+{
+ DT0 = float64_abs(DT1);
+}
+
+void helper_fabsq(void)
+{
+ QT0 = float128_abs(QT1);
+}
+#endif
+
+float32 helper_fsqrts(float32 src)
+{
+ return float32_sqrt(src, &env->fp_status);
+}
+
+void helper_fsqrtd(void)
+{
+ DT0 = float64_sqrt(DT1, &env->fp_status);
+}
+
+void helper_fsqrtq(void)
+{
+ QT0 = float128_sqrt(QT1, &env->fp_status);
+}
+
+#define GEN_FCMP(name, size, reg1, reg2, FS, E) \
+ void glue(helper_, name) (void) \
+ { \
+ env->fsr &= FSR_FTT_NMASK; \
+ if (E && (glue(size, _is_any_nan)(reg1) || \
+ glue(size, _is_any_nan)(reg2)) && \
+ (env->fsr & FSR_NVM)) { \
+ env->fsr |= FSR_NVC; \
+ env->fsr |= FSR_FTT_IEEE_EXCP; \
+ raise_exception(TT_FP_EXCP); \
+ } \
+ switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
+ case float_relation_unordered: \
+ if ((env->fsr & FSR_NVM)) { \
+ env->fsr |= FSR_NVC; \
+ env->fsr |= FSR_FTT_IEEE_EXCP; \
+ raise_exception(TT_FP_EXCP); \
+ } else { \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ env->fsr |= FSR_NVA; \
+ } \
+ break; \
+ case float_relation_less: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ }
+#define GEN_FCMPS(name, size, FS, E) \
+ void glue(helper_, name)(float32 src1, float32 src2) \
+ { \
+ env->fsr &= FSR_FTT_NMASK; \
+ if (E && (glue(size, _is_any_nan)(src1) || \
+ glue(size, _is_any_nan)(src2)) && \
+ (env->fsr & FSR_NVM)) { \
+ env->fsr |= FSR_NVC; \
+ env->fsr |= FSR_FTT_IEEE_EXCP; \
+ raise_exception(TT_FP_EXCP); \
+ } \
+ switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
+ case float_relation_unordered: \
+ if ((env->fsr & FSR_NVM)) { \
+ env->fsr |= FSR_NVC; \
+ env->fsr |= FSR_FTT_IEEE_EXCP; \
+ raise_exception(TT_FP_EXCP); \
+ } else { \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ env->fsr |= FSR_NVA; \
+ } \
+ break; \
+ case float_relation_less: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ env->fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ }
+
+GEN_FCMPS(fcmps, float32, 0, 0);
+GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
+
+GEN_FCMPS(fcmpes, float32, 0, 1);
+GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
+
+GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
+GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
+
+static uint32_t compute_all_flags(void)
+{
+ return env->psr & PSR_ICC;
+}
+
+static uint32_t compute_C_flags(void)
+{
+ return env->psr & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_icc(int32_t dst)
+{
+ uint32_t ret = 0;
+
+ if (dst == 0) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_flags_xcc(void)
+{
+ return env->xcc & PSR_ICC;
+}
+
+static uint32_t compute_C_flags_xcc(void)
+{
+ return env->xcc & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_xcc(target_long dst)
+{
+ uint32_t ret = 0;
+
+ if (!dst) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+#endif
+
+static inline uint32_t get_V_div_icc(target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src2 != 0) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_div(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_V_div_icc(CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_div(void)
+{
+ return 0;
+}
+
+static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_add_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_add_xcc(CC_DST, CC_SRC);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add_xcc(void)
+{
+ return get_C_add_xcc(CC_DST, CC_SRC);
+}
+#endif
+
+static uint32_t compute_all_add(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add(void)
+{
+ return get_C_add_icc(CC_DST, CC_SRC);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_addx_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+#endif
+
+static uint32_t compute_all_addx(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx(void)
+{
+ uint32_t ret;
+
+ ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if ((src1 | src2) & 0x3) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_tadd(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_taddtv(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ return ret;
+}
+
+static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_sub_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub_xcc(void)
+{
+ return get_C_sub_xcc(CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_sub(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub(void)
+{
+ return get_C_sub_icc(CC_SRC, CC_SRC2);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_subx_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx_xcc(void)
+{
+ uint32_t ret;
+
+ ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+#endif
+
+static uint32_t compute_all_subx(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx(void)
+{
+ uint32_t ret;
+
+ ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_tsub(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_tsubtv(void)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_logic(void)
+{
+ return get_NZ_icc(CC_DST);
+}
+
+static uint32_t compute_C_logic(void)
+{
+ return 0;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_logic_xcc(void)
+{
+ return get_NZ_xcc(CC_DST);
+}
+#endif
+
+typedef struct CCTable {
+ uint32_t (*compute_all)(void); /* return all the flags */
+ uint32_t (*compute_c)(void); /* return the C flag */
+} CCTable;
+
+static const CCTable icc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
+ [CC_OP_DIV] = { compute_all_div, compute_C_div },
+ [CC_OP_ADD] = { compute_all_add, compute_C_add },
+ [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
+ [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
+ [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
+ [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
+ [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
+ [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
+ [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
+ [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
+};
+
+#ifdef TARGET_SPARC64
+static const CCTable xcc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
+ [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
+ [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
+ [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
+ [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
+};
+#endif
+
+void helper_compute_psr(void)
+{
+ uint32_t new_psr;
+
+ new_psr = icc_table[CC_OP].compute_all();
+ env->psr = new_psr;
+#ifdef TARGET_SPARC64
+ new_psr = xcc_table[CC_OP].compute_all();
+ env->xcc = new_psr;
+#endif
+ CC_OP = CC_OP_FLAGS;
+}
+
+uint32_t helper_compute_C_icc(void)
+{
+ uint32_t ret;
+
+ ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
+ return ret;
+}
+
+static inline void memcpy32(target_ulong *dst, const target_ulong *src)
+{
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+}
+
+static void set_cwp(int new_cwp)
+{
+ /* put the modified wrap registers at their proper location */
+ if (env->cwp == env->nwindows - 1) {
+ memcpy32(env->regbase, env->regbase + env->nwindows * 16);
+ }
+ env->cwp = new_cwp;
+
+ /* put the wrap registers at their temporary location */
+ if (new_cwp == env->nwindows - 1) {
+ memcpy32(env->regbase + env->nwindows * 16, env->regbase);
+ }
+ env->regwptr = env->regbase + (new_cwp * 16);
+}
+
+void cpu_set_cwp(CPUState *env1, int new_cwp)
+{
+ CPUState *saved_env;
+
+ saved_env = env;
+ env = env1;
+ set_cwp(new_cwp);
+ env = saved_env;
+}
+
+static target_ulong get_psr(void)
+{
+ helper_compute_psr();
+
+#if !defined (TARGET_SPARC64)
+ return env->version | (env->psr & PSR_ICC) |
+ (env->psref? PSR_EF : 0) |
+ (env->psrpil << 8) |
+ (env->psrs? PSR_S : 0) |
+ (env->psrps? PSR_PS : 0) |
+ (env->psret? PSR_ET : 0) | env->cwp;
+#else
+ return env->psr & PSR_ICC;
+#endif
+}
+
+target_ulong cpu_get_psr(CPUState *env1)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = get_psr();
+ env = saved_env;
+ return ret;
+}
+
+static void put_psr(target_ulong val)
+{
+ env->psr = val & PSR_ICC;
+#if !defined (TARGET_SPARC64)
+ env->psref = (val & PSR_EF)? 1 : 0;
+ env->psrpil = (val & PSR_PIL) >> 8;
+#endif
+#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
+ cpu_check_irqs(env);
+#endif
+#if !defined (TARGET_SPARC64)
+ env->psrs = (val & PSR_S)? 1 : 0;
+ env->psrps = (val & PSR_PS)? 1 : 0;
+ env->psret = (val & PSR_ET)? 1 : 0;
+ set_cwp(val & PSR_CWP);
+#endif
+ env->cc_op = CC_OP_FLAGS;
+}
+
+void cpu_put_psr(CPUState *env1, target_ulong val)
+{
+ CPUState *saved_env;
+
+ saved_env = env;
+ env = env1;
+ put_psr(val);
+ env = saved_env;
+}
+
+static int cwp_inc(int cwp)
+{
+ if (unlikely(cwp >= env->nwindows)) {
+ cwp -= env->nwindows;
+ }
+ return cwp;
+}
+
+int cpu_cwp_inc(CPUState *env1, int cwp)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = cwp_inc(cwp);
+ env = saved_env;
+ return ret;
+}
+
+static int cwp_dec(int cwp)
+{
+ if (unlikely(cwp < 0)) {
+ cwp += env->nwindows;
+ }
+ return cwp;
+}
+
+int cpu_cwp_dec(CPUState *env1, int cwp)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = cwp_dec(cwp);
+ env = saved_env;
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
+GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
+GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
+
+GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
+GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
+GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
+
+GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
+GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
+GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
+
+GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
+GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
+GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
+
+GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
+GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
+GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
+
+GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
+GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
+GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
+#endif
+#undef GEN_FCMPS
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
+ defined(DEBUG_MXCC)
+static void dump_mxcc(CPUState *env)
+{
+ printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccdata[0], env->mxccdata[1],
+ env->mxccdata[2], env->mxccdata[3]);
+ printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n"
+ " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccregs[0], env->mxccregs[1],
+ env->mxccregs[2], env->mxccregs[3],
+ env->mxccregs[4], env->mxccregs[5],
+ env->mxccregs[6], env->mxccregs[7]);
+}
+#endif
+
+#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
+ && defined(DEBUG_ASI)
+static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
+ uint64_t r1)
+{
+ switch (size)
+ {
+ case 1:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xff);
+ break;
+ case 2:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffff);
+ break;
+ case 4:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffffffff);
+ break;
+ case 8:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
+ addr, asi, r1);
+ break;
+ }
+}
+#endif
+
+#ifndef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+
+
+/* Leon3 cache control */
+
+static void leon3_cache_control_int(void)
+{
+ uint32_t state = 0;
+
+ if (env->cache_control & CACHE_CTRL_IF) {
+ /* Instruction cache state */
+ state = env->cache_control & CACHE_STATE_MASK;
+ if (state == CACHE_ENABLED) {
+ state = CACHE_FROZEN;
+ DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
+ }
+
+ env->cache_control &= ~CACHE_STATE_MASK;
+ env->cache_control |= state;
+ }
+
+ if (env->cache_control & CACHE_CTRL_DF) {
+ /* Data cache state */
+ state = (env->cache_control >> 2) & CACHE_STATE_MASK;
+ if (state == CACHE_ENABLED) {
+ state = CACHE_FROZEN;
+ DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
+ }
+
+ env->cache_control &= ~(CACHE_STATE_MASK << 2);
+ env->cache_control |= (state << 2);
+ }
+}
+
+static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
+{
+ DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
+ addr, val, size);
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+
+ /* These values must always be read as zeros */
+ val &= ~CACHE_CTRL_FD;
+ val &= ~CACHE_CTRL_FI;
+ val &= ~CACHE_CTRL_IB;
+ val &= ~CACHE_CTRL_IP;
+ val &= ~CACHE_CTRL_DP;
+
+ env->cache_control = val;
+ break;
+ case 0x04: /* Instruction cache configuration */
+ case 0x08: /* Data cache configuration */
+ /* Read Only */
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
+ break;
+ };
+}
+
+static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
+{
+ uint64_t ret = 0;
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return 0;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+ ret = env->cache_control;
+ break;
+
+ /* Configuration registers are read and only always keep those
+ predefined values */
+
+ case 0x04: /* Instruction cache configuration */
+ ret = 0x10220000;
+ break;
+ case 0x08: /* Data cache configuration */
+ ret = 0x18220000;
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
+ break;
+ };
+ DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
+ addr, ret, size);
+ return ret;
+}
+
+void leon3_irq_manager(void *irq_manager, int intno)
+{
+ leon3_irq_ack(irq_manager, intno);
+ leon3_cache_control_int();
+}
+
+uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
+{
+ uint64_t ret = 0;
+#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
+ uint32_t last_addr = addr;
+#endif
+
+ helper_check_align(addr, size - 1);
+ switch (asi) {
+ case 2: /* SuperSparc MXCC registers and Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ ret = leon3_cache_control_ld(addr, size);
+ }
+ break;
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8)
+ ret = env->mxccregs[3];
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4)
+ ret = env->mxccregs[3];
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00c00: /* Module reset register */
+ if (size == 8) {
+ ret = env->mxccregs[5];
+ // should we do something here?
+ } else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8)
+ ret = env->mxccregs[7];
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ default:
+ DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
+ "addr = %08x -> ret = %" PRIx64 ","
+ "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case 3: /* MMU probe */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ if (mmulev > 4)
+ ret = 0;
+ else
+ ret = mmu_probe(env, addr, mmulev);
+ DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
+ addr, mmulev, ret);
+ }
+ break;
+ case 4: /* read MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+
+ ret = env->mmuregs[reg];
+ if (reg == 3) /* Fault status cleared on read */
+ env->mmuregs[3] = 0;
+ else if (reg == 0x13) /* Fault status read */
+ ret = env->mmuregs[3];
+ else if (reg == 0x14) /* Fault address read */
+ ret = env->mmuregs[4];
+ DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
+ }
+ break;
+ case 5: // Turbosparc ITLB Diagnostic
+ case 6: // Turbosparc DTLB Diagnostic
+ case 7: // Turbosparc IOTLB Diagnostic
+ break;
+ case 9: /* Supervisor code access */
+ switch(size) {
+ case 1:
+ ret = ldub_code(addr);
+ break;
+ case 2:
+ ret = lduw_code(addr);
+ break;
+ default:
+ case 4:
+ ret = ldl_code(addr);
+ break;
+ case 8:
+ ret = ldq_code(addr);
+ break;
+ }
+ break;
+ case 0xa: /* User data access */
+ switch(size) {
+ case 1:
+ ret = ldub_user(addr);
+ break;
+ case 2:
+ ret = lduw_user(addr);
+ break;
+ default:
+ case 4:
+ ret = ldl_user(addr);
+ break;
+ case 8:
+ ret = ldq_user(addr);
+ break;
+ }
+ break;
+ case 0xb: /* Supervisor data access */
+ switch(size) {
+ case 1:
+ ret = ldub_kernel(addr);
+ break;
+ case 2:
+ ret = lduw_kernel(addr);
+ break;
+ default:
+ case 4:
+ ret = ldl_kernel(addr);
+ break;
+ case 8:
+ ret = ldq_kernel(addr);
+ break;
+ }
+ break;
+ case 0xc: /* I-cache tag */
+ case 0xd: /* I-cache data */
+ case 0xe: /* D-cache tag */
+ case 0xf: /* D-cache data */
+ break;
+ case 0x20: /* MMU passthrough */
+ switch(size) {
+ case 1:
+ ret = ldub_phys(addr);
+ break;
+ case 2:
+ ret = lduw_phys(addr);
+ break;
+ default:
+ case 4:
+ ret = ldl_phys(addr);
+ break;
+ case 8:
+ ret = ldq_phys(addr);
+ break;
+ }
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ switch(size) {
+ case 1:
+ ret = ldub_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32));
+ break;
+ case 2:
+ ret = lduw_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32));
+ break;
+ default:
+ case 4:
+ ret = ldl_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32));
+ break;
+ case 8:
+ ret = ldq_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32));
+ break;
+ }
+ break;
+ case 0x30: // Turbosparc secondary cache diagnostic
+ case 0x31: // Turbosparc RAM snoop
+ case 0x32: // Turbosparc page table descriptor diagnostic
+ case 0x39: /* data cache diagnostic register */
+ case 0x4c: /* SuperSPARC MMU Breakpoint Action register */
+ ret = 0;
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch(reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ ret = env->mmubpregs[reg];
+ break;
+ case 1: /* Breakpoint Mask */
+ ret = env->mmubpregs[reg];
+ break;
+ case 2: /* Breakpoint Control */
+ ret = env->mmubpregs[reg];
+ break;
+ case 3: /* Breakpoint Status */
+ ret = env->mmubpregs[reg];
+ env->mmubpregs[reg] = 0ULL;
+ break;
+ }
+ DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
+ ret);
+ }
+ break;
+ case 8: /* User code access, XXX */
+ default:
+ do_unassigned_access(addr, 0, 0, asi, size);
+ ret = 0;
+ break;
+ }
+ if (sign) {
+ switch(size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
+{
+ helper_check_align(addr, size - 1);
+ switch(asi) {
+ case 2: /* SuperSparc MXCC registers and Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ leon3_cache_control_st(addr, val, size);
+ }
+ break;
+
+ case 0x01c00000: /* MXCC stream data register 0 */
+ if (size == 8)
+ env->mxccdata[0] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00008: /* MXCC stream data register 1 */
+ if (size == 8)
+ env->mxccdata[1] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00010: /* MXCC stream data register 2 */
+ if (size == 8)
+ env->mxccdata[2] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00018: /* MXCC stream data register 3 */
+ if (size == 8)
+ env->mxccdata[3] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00100: /* MXCC stream source */
+ if (size == 8)
+ env->mxccregs[0] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ 0);
+ env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ 8);
+ env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ 16);
+ env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
+ 24);
+ break;
+ case 0x01c00200: /* MXCC stream destination */
+ if (size == 8)
+ env->mxccregs[1] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
+ env->mxccdata[0]);
+ stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
+ env->mxccdata[1]);
+ stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
+ env->mxccdata[2]);
+ stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
+ env->mxccdata[3]);
+ break;
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8)
+ env->mxccregs[3] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4)
+ env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
+ | val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00e00: /* MXCC error register */
+ // writing a 1 bit clears the error
+ if (size == 8)
+ env->mxccregs[6] &= ~val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8)
+ env->mxccregs[7] = val;
+ else
+ DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
+ size);
+ break;
+ default:
+ DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
+ asi, size, addr, val);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case 3: /* MMU flush */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ DPRINTF_MMU("mmu flush level %d\n", mmulev);
+ switch (mmulev) {
+ case 0: // flush page
+ tlb_flush_page(env, addr & 0xfffff000);
+ break;
+ case 1: // flush segment (256k)
+ case 2: // flush region (16M)
+ case 3: // flush context (4G)
+ case 4: // flush entire
+ tlb_flush(env, 1);
+ break;
+ default:
+ break;
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ }
+ break;
+ case 4: /* write MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+ uint32_t oldreg;
+
+ oldreg = env->mmuregs[reg];
+ switch(reg) {
+ case 0: // Control Register
+ env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
+ (val & 0x00ffffff);
+ // Mappings generated during no-fault mode or MMU
+ // disabled mode are invalid in normal mode
+ if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
+ (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
+ tlb_flush(env, 1);
+ break;
+ case 1: // Context Table Pointer Register
+ env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
+ break;
+ case 2: // Context Register
+ env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
+ if (oldreg != env->mmuregs[reg]) {
+ /* we flush when the MMU context changes because
+ QEMU has no MMU context support */
+ tlb_flush(env, 1);
+ }
+ break;
+ case 3: // Synchronous Fault Status Register with Clear
+ case 4: // Synchronous Fault Address Register
+ break;
+ case 0x10: // TLB Replacement Control Register
+ env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
+ break;
+ case 0x13: // Synchronous Fault Status Register with Read and Clear
+ env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
+ break;
+ case 0x14: // Synchronous Fault Address Register
+ env->mmuregs[4] = val;
+ break;
+ default:
+ env->mmuregs[reg] = val;
+ break;
+ }
+ if (oldreg != env->mmuregs[reg]) {
+ DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
+ reg, oldreg, env->mmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ }
+ break;
+ case 5: // Turbosparc ITLB Diagnostic
+ case 6: // Turbosparc DTLB Diagnostic
+ case 7: // Turbosparc IOTLB Diagnostic
+ break;
+ case 0xa: /* User data access */
+ switch(size) {
+ case 1:
+ stb_user(addr, val);
+ break;
+ case 2:
+ stw_user(addr, val);
+ break;
+ default:
+ case 4:
+ stl_user(addr, val);
+ break;
+ case 8:
+ stq_user(addr, val);
+ break;
+ }
+ break;
+ case 0xb: /* Supervisor data access */
+ switch(size) {
+ case 1:
+ stb_kernel(addr, val);
+ break;
+ case 2:
+ stw_kernel(addr, val);
+ break;
+ default:
+ case 4:
+ stl_kernel(addr, val);
+ break;
+ case 8:
+ stq_kernel(addr, val);
+ break;
+ }
+ break;
+ case 0xc: /* I-cache tag */
+ case 0xd: /* I-cache data */
+ case 0xe: /* D-cache tag */
+ case 0xf: /* D-cache data */
+ case 0x10: /* I/D-cache flush page */
+ case 0x11: /* I/D-cache flush segment */
+ case 0x12: /* I/D-cache flush region */
+ case 0x13: /* I/D-cache flush context */
+ case 0x14: /* I/D-cache flush user */
+ break;
+ case 0x17: /* Block copy, sta access */
+ {
+ // val = src
+ // addr = dst
+ // copy 32 bytes
+ unsigned int i;
+ uint32_t src = val & ~3, dst = addr & ~3, temp;
+
+ for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
+ temp = ldl_kernel(src);
+ stl_kernel(dst, temp);
+ }
+ }
+ break;
+ case 0x1f: /* Block fill, stda access */
+ {
+ // addr = dst
+ // fill 32 bytes with val
+ unsigned int i;
+ uint32_t dst = addr & 7;
+
+ for (i = 0; i < 32; i += 8, dst += 8)
+ stq_kernel(dst, val);
+ }
+ break;
+ case 0x20: /* MMU passthrough */
+ {
+ switch(size) {
+ case 1:
+ stb_phys(addr, val);
+ break;
+ case 2:
+ stw_phys(addr, val);
+ break;
+ case 4:
+ default:
+ stl_phys(addr, val);
+ break;
+ case 8:
+ stq_phys(addr, val);
+ break;
+ }
+ }
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ {
+ switch(size) {
+ case 1:
+ stb_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32), val);
+ break;
+ case 2:
+ stw_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32), val);
+ break;
+ case 4:
+ default:
+ stl_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32), val);
+ break;
+ case 8:
+ stq_phys((target_phys_addr_t)addr
+ | ((target_phys_addr_t)(asi & 0xf) << 32), val);
+ break;
+ }
+ }
+ break;
+ case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
+ case 0x31: // store buffer data, Ross RT620 I-cache flush or
+ // Turbosparc snoop RAM
+ case 0x32: // store buffer control or Turbosparc page table
+ // descriptor diagnostic
+ case 0x36: /* I-cache flash clear */
+ case 0x37: /* D-cache flash clear */
+ case 0x4c: /* breakpoint action */
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch(reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 1: /* Breakpoint Mask */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 2: /* Breakpoint Control */
+ env->mmubpregs[reg] = (val & 0x7fULL);
+ break;
+ case 3: /* Breakpoint Status */
+ env->mmubpregs[reg] = (val & 0xfULL);
+ break;
+ }
+ DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
+ env->mmuregs[reg]);
+ }
+ break;
+ case 8: /* User code access, XXX */
+ case 9: /* Supervisor code access, XXX */
+ default:
+ do_unassigned_access(addr, 1, 0, asi, size);
+ break;
+ }
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+}
+
+#endif /* CONFIG_USER_ONLY */
+#else /* TARGET_SPARC64 */
+
+#ifdef CONFIG_USER_ONLY
+uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
+{
+ uint64_t ret = 0;
+#if defined(DEBUG_ASI)
+ target_ulong last_addr = addr;
+#endif
+
+ if (asi < 0x80)
+ raise_exception(TT_PRIV_ACT);
+
+ helper_check_align(addr, size - 1);
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case 0x82: // Primary no-fault
+ case 0x8a: // Primary no-fault LE
+ if (page_check_range(addr, size, PAGE_READ) == -1) {
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return 0;
+ }
+ // Fall through
+ case 0x80: // Primary
+ case 0x88: // Primary LE
+ {
+ switch(size) {
+ case 1:
+ ret = ldub_raw(addr);
+ break;
+ case 2:
+ ret = lduw_raw(addr);
+ break;
+ case 4:
+ ret = ldl_raw(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_raw(addr);
+ break;
+ }
+ }
+ break;
+ case 0x83: // Secondary no-fault
+ case 0x8b: // Secondary no-fault LE
+ if (page_check_range(addr, size, PAGE_READ) == -1) {
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return 0;
+ }
+ // Fall through
+ case 0x81: // Secondary
+ case 0x89: // Secondary LE
+ // XXX
+ break;
+ default:
+ break;
+ }
+
+ /* Convert from little endian */
+ switch (asi) {
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ case 0x8a: // Primary no-fault LE
+ case 0x8b: // Secondary no-fault LE
+ switch(size) {
+ case 2:
+ ret = bswap16(ret);
+ break;
+ case 4:
+ ret = bswap32(ret);
+ break;
+ case 8:
+ ret = bswap64(ret);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch(size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
+{
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+ if (asi < 0x80)
+ raise_exception(TT_PRIV_ACT);
+
+ helper_check_align(addr, size - 1);
+ addr = asi_address_mask(env, asi, addr);
+
+ /* Convert to little endian */
+ switch (asi) {
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ switch(size) {
+ case 2:
+ val = bswap16(val);
+ break;
+ case 4:
+ val = bswap32(val);
+ break;
+ case 8:
+ val = bswap64(val);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ switch(asi) {
+ case 0x80: // Primary
+ case 0x88: // Primary LE
+ {
+ switch(size) {
+ case 1:
+ stb_raw(addr, val);
+ break;
+ case 2:
+ stw_raw(addr, val);
+ break;
+ case 4:
+ stl_raw(addr, val);
+ break;
+ case 8:
+ default:
+ stq_raw(addr, val);
+ break;
+ }
+ }
+ break;
+ case 0x81: // Secondary
+ case 0x89: // Secondary LE
+ // XXX
+ return;
+
+ case 0x82: // Primary no-fault, RO
+ case 0x83: // Secondary no-fault, RO
+ case 0x8a: // Primary no-fault LE, RO
+ case 0x8b: // Secondary no-fault LE, RO
+ default:
+ do_unassigned_access(addr, 1, 0, 1, size);
+ return;
+ }
+}
+
+#else /* CONFIG_USER_ONLY */
+
+uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
+{
+ uint64_t ret = 0;
+#if defined(DEBUG_ASI)
+ target_ulong last_addr = addr;
+#endif
+
+ asi &= 0xff;
+
+ if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+ || (cpu_has_hypervisor(env)
+ && asi >= 0x30 && asi < 0x80
+ && !(env->hpstate & HS_PRIV)))
+ raise_exception(TT_PRIV_ACT);
+
+ helper_check_align(addr, size - 1);
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case 0x82: // Primary no-fault
+ case 0x8a: // Primary no-fault LE
+ case 0x83: // Secondary no-fault
+ case 0x8b: // Secondary no-fault LE
+ {
+ /* secondary space access has lowest asi bit equal to 1 */
+ int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
+ : MMU_KERNEL_SECONDARY_IDX;
+
+ if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return 0;
+ }
+ }
+ // Fall through
+ case 0x10: // As if user primary
+ case 0x11: // As if user secondary
+ case 0x18: // As if user primary LE
+ case 0x19: // As if user secondary LE
+ case 0x80: // Primary
+ case 0x81: // Secondary
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ case 0xe2: // UA2007 Primary block init
+ case 0xe3: // UA2007 Secondary block init
+ if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
+ if (cpu_hypervisor_mode(env)) {
+ switch(size) {
+ case 1:
+ ret = ldub_hypv(addr);
+ break;
+ case 2:
+ ret = lduw_hypv(addr);
+ break;
+ case 4:
+ ret = ldl_hypv(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_hypv(addr);
+ break;
+ }
+ } else {
+ /* secondary space access has lowest asi bit equal to 1 */
+ if (asi & 1) {
+ switch(size) {
+ case 1:
+ ret = ldub_kernel_secondary(addr);
+ break;
+ case 2:
+ ret = lduw_kernel_secondary(addr);
+ break;
+ case 4:
+ ret = ldl_kernel_secondary(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_kernel_secondary(addr);
+ break;
+ }
+ } else {
+ switch(size) {
+ case 1:
+ ret = ldub_kernel(addr);
+ break;
+ case 2:
+ ret = lduw_kernel(addr);
+ break;
+ case 4:
+ ret = ldl_kernel(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_kernel(addr);
+ break;
+ }
+ }
+ }
+ } else {
+ /* secondary space access has lowest asi bit equal to 1 */
+ if (asi & 1) {
+ switch(size) {
+ case 1:
+ ret = ldub_user_secondary(addr);
+ break;
+ case 2:
+ ret = lduw_user_secondary(addr);
+ break;
+ case 4:
+ ret = ldl_user_secondary(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_user_secondary(addr);
+ break;
+ }
+ } else {
+ switch(size) {
+ case 1:
+ ret = ldub_user(addr);
+ break;
+ case 2:
+ ret = lduw_user(addr);
+ break;
+ case 4:
+ ret = ldl_user(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_user(addr);
+ break;
+ }
+ }
+ }
+ break;
+ case 0x14: // Bypass
+ case 0x15: // Bypass, non-cacheable
+ case 0x1c: // Bypass LE
+ case 0x1d: // Bypass, non-cacheable LE
+ {
+ switch(size) {
+ case 1:
+ ret = ldub_phys(addr);
+ break;
+ case 2:
+ ret = lduw_phys(addr);
+ break;
+ case 4:
+ ret = ldl_phys(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_phys(addr);
+ break;
+ }
+ break;
+ }
+ case 0x24: // Nucleus quad LDD 128 bit atomic
+ case 0x2c: // Nucleus quad LDD 128 bit atomic LE
+ // Only ldda allowed
+ raise_exception(TT_ILL_INSN);
+ return 0;
+ case 0x04: // Nucleus
+ case 0x0c: // Nucleus Little Endian (LE)
+ {
+ switch(size) {
+ case 1:
+ ret = ldub_nucleus(addr);
+ break;
+ case 2:
+ ret = lduw_nucleus(addr);
+ break;
+ case 4:
+ ret = ldl_nucleus(addr);
+ break;
+ default:
+ case 8:
+ ret = ldq_nucleus(addr);
+ break;
+ }
+ break;
+ }
+ case 0x4a: // UPA config
+ // XXX
+ break;
+ case 0x45: // LSU
+ ret = env->lsu;
+ break;
+ case 0x50: // I-MMU regs
+ {
+ int reg = (addr >> 3) & 0xf;
+
+ if (reg == 0) {
+ // I-TSB Tag Target register
+ ret = ultrasparc_tag_target(env->immu.tag_access);
+ } else {
+ ret = env->immuregs[reg];
+ }
+
+ break;
+ }
+ case 0x51: // I-MMU 8k TSB pointer
+ {
+ // env->immuregs[5] holds I-MMU TSB register value
+ // env->immuregs[6] holds I-MMU Tag Access register value
+ ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+ 8*1024);
+ break;
+ }
+ case 0x52: // I-MMU 64k TSB pointer
+ {
+ // env->immuregs[5] holds I-MMU TSB register value
+ // env->immuregs[6] holds I-MMU Tag Access register value
+ ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+ 64*1024);
+ break;
+ }
+ case 0x55: // I-MMU data access
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tte;
+ break;
+ }
+ case 0x56: // I-MMU tag read
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tag;
+ break;
+ }
+ case 0x58: // D-MMU regs
+ {
+ int reg = (addr >> 3) & 0xf;
+
+ if (reg == 0) {
+ // D-TSB Tag Target register
+ ret = ultrasparc_tag_target(env->dmmu.tag_access);
+ } else {
+ ret = env->dmmuregs[reg];
+ }
+ break;
+ }
+ case 0x59: // D-MMU 8k TSB pointer
+ {
+ // env->dmmuregs[5] holds D-MMU TSB register value
+ // env->dmmuregs[6] holds D-MMU Tag Access register value
+ ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+ 8*1024);
+ break;
+ }
+ case 0x5a: // D-MMU 64k TSB pointer
+ {
+ // env->dmmuregs[5] holds D-MMU TSB register value
+ // env->dmmuregs[6] holds D-MMU Tag Access register value
+ ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+ 64*1024);
+ break;
+ }
+ case 0x5d: // D-MMU data access
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tte;
+ break;
+ }
+ case 0x5e: // D-MMU tag read
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tag;
+ break;
+ }
+ case 0x46: // D-cache data
+ case 0x47: // D-cache tag access
+ case 0x4b: // E-cache error enable
+ case 0x4c: // E-cache asynchronous fault status
+ case 0x4d: // E-cache asynchronous fault address
+ case 0x4e: // E-cache tag data
+ case 0x66: // I-cache instruction access
+ case 0x67: // I-cache tag access
+ case 0x6e: // I-cache predecode
+ case 0x6f: // I-cache LRU etc.
+ case 0x76: // E-cache tag
+ case 0x7e: // E-cache tag
+ break;
+ case 0x5b: // D-MMU data pointer
+ case 0x48: // Interrupt dispatch, RO
+ case 0x49: // Interrupt data receive
+ case 0x7f: // Incoming interrupt vector, RO
+ // XXX
+ break;
+ case 0x54: // I-MMU data in, WO
+ case 0x57: // I-MMU demap, WO
+ case 0x5c: // D-MMU data in, WO
+ case 0x5f: // D-MMU demap, WO
+ case 0x77: // Interrupt vector, WO
+ default:
+ do_unassigned_access(addr, 0, 0, 1, size);
+ ret = 0;
+ break;
+ }
+
+ /* Convert from little endian */
+ switch (asi) {
+ case 0x0c: // Nucleus Little Endian (LE)
+ case 0x18: // As if user primary LE
+ case 0x19: // As if user secondary LE
+ case 0x1c: // Bypass LE
+ case 0x1d: // Bypass, non-cacheable LE
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ case 0x8a: // Primary no-fault LE
+ case 0x8b: // Secondary no-fault LE
+ switch(size) {
+ case 2:
+ ret = bswap16(ret);
+ break;
+ case 4:
+ ret = bswap32(ret);
+ break;
+ case 8:
+ ret = bswap64(ret);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch(size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
+{
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+
+ asi &= 0xff;
+
+ if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+ || (cpu_has_hypervisor(env)
+ && asi >= 0x30 && asi < 0x80
+ && !(env->hpstate & HS_PRIV)))
+ raise_exception(TT_PRIV_ACT);
+
+ helper_check_align(addr, size - 1);
+ addr = asi_address_mask(env, asi, addr);
+
+ /* Convert to little endian */
+ switch (asi) {
+ case 0x0c: // Nucleus Little Endian (LE)
+ case 0x18: // As if user primary LE
+ case 0x19: // As if user secondary LE
+ case 0x1c: // Bypass LE
+ case 0x1d: // Bypass, non-cacheable LE
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ switch(size) {
+ case 2:
+ val = bswap16(val);
+ break;
+ case 4:
+ val = bswap32(val);
+ break;
+ case 8:
+ val = bswap64(val);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ switch(asi) {
+ case 0x10: // As if user primary
+ case 0x11: // As if user secondary
+ case 0x18: // As if user primary LE
+ case 0x19: // As if user secondary LE
+ case 0x80: // Primary
+ case 0x81: // Secondary
+ case 0x88: // Primary LE
+ case 0x89: // Secondary LE
+ case 0xe2: // UA2007 Primary block init
+ case 0xe3: // UA2007 Secondary block init
+ if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
+ if (cpu_hypervisor_mode(env)) {
+ switch(size) {
+ case 1:
+ stb_hypv(addr, val);
+ break;
+ case 2:
+ stw_hypv(addr, val);
+ break;
+ case 4:
+ stl_hypv(addr, val);
+ break;
+ case 8:
+ default:
+ stq_hypv(addr, val);
+ break;
+ }
+ } else {
+ /* secondary space access has lowest asi bit equal to 1 */
+ if (asi & 1) {
+ switch(size) {
+ case 1:
+ stb_kernel_secondary(addr, val);
+ break;
+ case 2:
+ stw_kernel_secondary(addr, val);
+ break;
+ case 4:
+ stl_kernel_secondary(addr, val);
+ break;
+ case 8:
+ default:
+ stq_kernel_secondary(addr, val);
+ break;
+ }
+ } else {
+ switch(size) {
+ case 1:
+ stb_kernel(addr, val);
+ break;
+ case 2:
+ stw_kernel(addr, val);
+ break;
+ case 4:
+ stl_kernel(addr, val);
+ break;
+ case 8:
+ default:
+ stq_kernel(addr, val);
+ break;
+ }
+ }
+ }
+ } else {
+ /* secondary space access has lowest asi bit equal to 1 */
+ if (asi & 1) {
+ switch(size) {
+ case 1:
+ stb_user_secondary(addr, val);
+ break;
+ case 2:
+ stw_user_secondary(addr, val);
+ break;
+ case 4:
+ stl_user_secondary(addr, val);
+ break;
+ case 8:
+ default:
+ stq_user_secondary(addr, val);
+ break;
+ }
+ } else {
+ switch(size) {
+ case 1:
+ stb_user(addr, val);
+ break;
+ case 2:
+ stw_user(addr, val);
+ break;
+ case 4:
+ stl_user(addr, val);
+ break;
+ case 8:
+ default:
+ stq_user(addr, val);
+ break;
+ }
+ }
+ }
+ break;
+ case 0x14: // Bypass
+ case 0x15: // Bypass, non-cacheable
+ case 0x1c: // Bypass LE
+ case 0x1d: // Bypass, non-cacheable LE
+ {
+ switch(size) {
+ case 1:
+ stb_phys(addr, val);
+ break;
+ case 2:
+ stw_phys(addr, val);
+ break;
+ case 4:
+ stl_phys(addr, val);
+ break;
+ case 8:
+ default:
+ stq_phys(addr, val);
+ break;
+ }
+ }
+ return;
+ case 0x24: // Nucleus quad LDD 128 bit atomic
+ case 0x2c: // Nucleus quad LDD 128 bit atomic LE
+ // Only ldda allowed
+ raise_exception(TT_ILL_INSN);
+ return;
+ case 0x04: // Nucleus
+ case 0x0c: // Nucleus Little Endian (LE)
+ {
+ switch(size) {
+ case 1:
+ stb_nucleus(addr, val);
+ break;
+ case 2:
+ stw_nucleus(addr, val);
+ break;
+ case 4:
+ stl_nucleus(addr, val);
+ break;
+ default:
+ case 8:
+ stq_nucleus(addr, val);
+ break;
+ }
+ break;
+ }
+
+ case 0x4a: // UPA config
+ // XXX
+ return;
+ case 0x45: // LSU
+ {
+ uint64_t oldreg;
+
+ oldreg = env->lsu;
+ env->lsu = val & (DMMU_E | IMMU_E);
+ // Mappings generated during D/I MMU disabled mode are
+ // invalid in normal mode
+ if (oldreg != env->lsu) {
+ DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
+ oldreg, env->lsu);
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ tlb_flush(env, 1);
+ }
+ return;
+ }
+ case 0x50: // I-MMU regs
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->immuregs[reg];
+ switch(reg) {
+ case 0: // RO
+ return;
+ case 1: // Not in I-MMU
+ case 2:
+ return;
+ case 3: // SFSR
+ if ((val & 1) == 0)
+ val = 0; // Clear SFSR
+ env->immu.sfsr = val;
+ break;
+ case 4: // RO
+ return;
+ case 5: // TSB access
+ DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->immu.tsb, val);
+ env->immu.tsb = val;
+ break;
+ case 6: // Tag access
+ env->immu.tag_access = val;
+ break;
+ case 7:
+ case 8:
+ return;
+ default:
+ break;
+ }
+
+ if (oldreg != env->immuregs[reg]) {
+ DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case 0x54: // I-MMU data in
+ replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
+ return;
+ case 0x55: // I-MMU data access
+ {
+ // TODO: auto demap
+
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case 0x57: // I-MMU demap
+ demap_tlb(env->itlb, addr, "immu", env);
+ return;
+ case 0x58: // D-MMU regs
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->dmmuregs[reg];
+ switch(reg) {
+ case 0: // RO
+ case 4:
+ return;
+ case 3: // SFSR
+ if ((val & 1) == 0) {
+ val = 0; // Clear SFSR, Fault address
+ env->dmmu.sfar = 0;
+ }
+ env->dmmu.sfsr = val;
+ break;
+ case 1: // Primary context
+ env->dmmu.mmu_primary_context = val;
+ /* can be optimized to only flush MMU_USER_IDX
+ and MMU_KERNEL_IDX entries */
+ tlb_flush(env, 1);
+ break;
+ case 2: // Secondary context
+ env->dmmu.mmu_secondary_context = val;
+ /* can be optimized to only flush MMU_USER_SECONDARY_IDX
+ and MMU_KERNEL_SECONDARY_IDX entries */
+ tlb_flush(env, 1);
+ break;
+ case 5: // TSB access
+ DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->dmmu.tsb, val);
+ env->dmmu.tsb = val;
+ break;
+ case 6: // Tag access
+ env->dmmu.tag_access = val;
+ break;
+ case 7: // Virtual Watchpoint
+ case 8: // Physical Watchpoint
+ default:
+ env->dmmuregs[reg] = val;
+ break;
+ }
+
+ if (oldreg != env->dmmuregs[reg]) {
+ DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case 0x5c: // D-MMU data in
+ replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
+ return;
+ case 0x5d: // D-MMU data access
+ {
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case 0x5f: // D-MMU demap
+ demap_tlb(env->dtlb, addr, "dmmu", env);
+ return;
+ case 0x49: // Interrupt data receive
+ // XXX
+ return;
+ case 0x46: // D-cache data
+ case 0x47: // D-cache tag access
+ case 0x4b: // E-cache error enable
+ case 0x4c: // E-cache asynchronous fault status
+ case 0x4d: // E-cache asynchronous fault address
+ case 0x4e: // E-cache tag data
+ case 0x66: // I-cache instruction access
+ case 0x67: // I-cache tag access
+ case 0x6e: // I-cache predecode
+ case 0x6f: // I-cache LRU etc.
+ case 0x76: // E-cache tag
+ case 0x7e: // E-cache tag
+ return;
+ case 0x51: // I-MMU 8k TSB pointer, RO
+ case 0x52: // I-MMU 64k TSB pointer, RO
+ case 0x56: // I-MMU tag read, RO
+ case 0x59: // D-MMU 8k TSB pointer, RO
+ case 0x5a: // D-MMU 64k TSB pointer, RO
+ case 0x5b: // D-MMU data pointer, RO
+ case 0x5e: // D-MMU tag read, RO
+ case 0x48: // Interrupt dispatch, RO
+ case 0x7f: // Incoming interrupt vector, RO
+ case 0x82: // Primary no-fault, RO
+ case 0x83: // Secondary no-fault, RO
+ case 0x8a: // Primary no-fault LE, RO
+ case 0x8b: // Secondary no-fault LE, RO
+ default:
+ do_unassigned_access(addr, 1, 0, 1, size);
+ return;
+ }
+}
+#endif /* CONFIG_USER_ONLY */
+
+void helper_ldda_asi(target_ulong addr, int asi, int rd)
+{
+ if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+ || (cpu_has_hypervisor(env)
+ && asi >= 0x30 && asi < 0x80
+ && !(env->hpstate & HS_PRIV)))
+ raise_exception(TT_PRIV_ACT);
+
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+#if !defined(CONFIG_USER_ONLY)
+ case 0x24: // Nucleus quad LDD 128 bit atomic
+ case 0x2c: // Nucleus quad LDD 128 bit atomic LE
+ helper_check_align(addr, 0xf);
+ if (rd == 0) {
+ env->gregs[1] = ldq_nucleus(addr + 8);
+ if (asi == 0x2c)
+ bswap64s(&env->gregs[1]);
+ } else if (rd < 8) {
+ env->gregs[rd] = ldq_nucleus(addr);
+ env->gregs[rd + 1] = ldq_nucleus(addr + 8);
+ if (asi == 0x2c) {
+ bswap64s(&env->gregs[rd]);
+ bswap64s(&env->gregs[rd + 1]);
+ }
+ } else {
+ env->regwptr[rd] = ldq_nucleus(addr);
+ env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
+ if (asi == 0x2c) {
+ bswap64s(&env->regwptr[rd]);
+ bswap64s(&env->regwptr[rd + 1]);
+ }
+ }
+ break;
+#endif
+ default:
+ helper_check_align(addr, 0x3);
+ if (rd == 0)
+ env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
+ else if (rd < 8) {
+ env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
+ env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
+ } else {
+ env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
+ env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
+ }
+ break;
+ }
+}
+
+void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
+{
+ unsigned int i;
+ target_ulong val;
+
+ helper_check_align(addr, 3);
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case 0xf0: // Block load primary
+ case 0xf1: // Block load secondary
+ case 0xf8: // Block load primary LE
+ case 0xf9: // Block load secondary LE
+ if (rd & 7) {
+ raise_exception(TT_ILL_INSN);
+ return;
+ }
+ helper_check_align(addr, 0x3f);
+ for (i = 0; i < 16; i++) {
+ *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
+ 0);
+ addr += 4;
+ }
+
+ return;
+ case 0x70: // Block load primary, user privilege
+ case 0x71: // Block load secondary, user privilege
+ if (rd & 7) {
+ raise_exception(TT_ILL_INSN);
+ return;
+ }
+ helper_check_align(addr, 0x3f);
+ for (i = 0; i < 16; i++) {
+ *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x1f, 4,
+ 0);
+ addr += 4;
+ }
+
+ return;
+ default:
+ break;
+ }
+
+ val = helper_ld_asi(addr, asi, size, 0);
+ switch(size) {
+ default:
+ case 4:
+ *((uint32_t *)&env->fpr[rd]) = val;
+ break;
+ case 8:
+ *((int64_t *)&DT0) = val;
+ break;
+ case 16:
+ // XXX
+ break;
+ }
+}
+
+void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
+{
+ unsigned int i;
+ target_ulong val = 0;
+
+ helper_check_align(addr, 3);
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case 0xe0: // UA2007 Block commit store primary (cache flush)
+ case 0xe1: // UA2007 Block commit store secondary (cache flush)
+ case 0xf0: // Block store primary
+ case 0xf1: // Block store secondary
+ case 0xf8: // Block store primary LE
+ case 0xf9: // Block store secondary LE
+ if (rd & 7) {
+ raise_exception(TT_ILL_INSN);
+ return;
+ }
+ helper_check_align(addr, 0x3f);
+ for (i = 0; i < 16; i++) {
+ val = *(uint32_t *)&env->fpr[rd++];
+ helper_st_asi(addr, val, asi & 0x8f, 4);
+ addr += 4;
+ }
+
+ return;
+ case 0x70: // Block store primary, user privilege
+ case 0x71: // Block store secondary, user privilege
+ if (rd & 7) {
+ raise_exception(TT_ILL_INSN);
+ return;
+ }
+ helper_check_align(addr, 0x3f);
+ for (i = 0; i < 16; i++) {
+ val = *(uint32_t *)&env->fpr[rd++];
+ helper_st_asi(addr, val, asi & 0x1f, 4);
+ addr += 4;
+ }
+
+ return;
+ default:
+ break;
+ }
+
+ switch(size) {
+ default:
+ case 4:
+ val = *((uint32_t *)&env->fpr[rd]);
+ break;
+ case 8:
+ val = *((int64_t *)&DT0);
+ break;
+ case 16:
+ // XXX
+ break;
+ }
+ helper_st_asi(addr, val, asi, size);
+}
+
+target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
+ target_ulong val2, uint32_t asi)
+{
+ target_ulong ret;
+
+ val2 &= 0xffffffffUL;
+ ret = helper_ld_asi(addr, asi, 4, 0);
+ ret &= 0xffffffffUL;
+ if (val2 == ret)
+ helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
+ return ret;
+}
+
+target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
+ target_ulong val2, uint32_t asi)
+{
+ target_ulong ret;
+
+ ret = helper_ld_asi(addr, asi, 8, 0);
+ if (val2 == ret)
+ helper_st_asi(addr, val1, asi, 8);
+ return ret;
+}
+#endif /* TARGET_SPARC64 */
+
+#ifndef TARGET_SPARC64
+void helper_rett(void)
+{
+ unsigned int cwp;
+
+ if (env->psret == 1)
+ raise_exception(TT_ILL_INSN);
+
+ env->psret = 1;
+ cwp = cwp_inc(env->cwp + 1) ;
+ if (env->wim & (1 << cwp)) {
+ raise_exception(TT_WIN_UNF);
+ }
+ set_cwp(cwp);
+ env->psrs = env->psrps;
+}
+#endif
+
+static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
+{
+ int overflow = 0;
+ uint64_t x0;
+ uint32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ raise_exception(TT_DIV_ZERO);
+ }
+
+ x0 = x0 / x1;
+ if (x0 > 0xffffffff) {
+ x0 = 0xffffffff;
+ overflow = 1;
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_udiv(target_ulong a, target_ulong b)
+{
+ return helper_udiv_common(a, b, 0);
+}
+
+target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
+{
+ return helper_udiv_common(a, b, 1);
+}
+
+static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
+{
+ int overflow = 0;
+ int64_t x0;
+ int32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ raise_exception(TT_DIV_ZERO);
+ }
+
+ x0 = x0 / x1;
+ if ((int32_t) x0 != x0) {
+ x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
+ overflow = 1;
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_sdiv(target_ulong a, target_ulong b)
+{
+ return helper_sdiv_common(a, b, 0);
+}
+
+target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
+{
+ return helper_sdiv_common(a, b, 1);
+}
+
+void helper_stdf(target_ulong addr, int mem_idx)
+{
+ helper_check_align(addr, 7);
+#if !defined(CONFIG_USER_ONLY)
+ switch (mem_idx) {
+ case MMU_USER_IDX:
+ stfq_user(addr, DT0);
+ break;
+ case MMU_KERNEL_IDX:
+ stfq_kernel(addr, DT0);
+ break;
+#ifdef TARGET_SPARC64
+ case MMU_HYPV_IDX:
+ stfq_hypv(addr, DT0);
+ break;
+#endif
+ default:
+ DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
+ break;
+ }
+#else
+ stfq_raw(address_mask(env, addr), DT0);
+#endif
+}
+
+void helper_lddf(target_ulong addr, int mem_idx)
+{
+ helper_check_align(addr, 7);
+#if !defined(CONFIG_USER_ONLY)
+ switch (mem_idx) {
+ case MMU_USER_IDX:
+ DT0 = ldfq_user(addr);
+ break;
+ case MMU_KERNEL_IDX:
+ DT0 = ldfq_kernel(addr);
+ break;
+#ifdef TARGET_SPARC64
+ case MMU_HYPV_IDX:
+ DT0 = ldfq_hypv(addr);
+ break;
+#endif
+ default:
+ DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
+ break;
+ }
+#else
+ DT0 = ldfq_raw(address_mask(env, addr));
+#endif
+}
+
+void helper_ldqf(target_ulong addr, int mem_idx)
+{
+ // XXX add 128 bit load
+ CPU_QuadU u;
+
+ helper_check_align(addr, 7);
+#if !defined(CONFIG_USER_ONLY)
+ switch (mem_idx) {
+ case MMU_USER_IDX:
+ u.ll.upper = ldq_user(addr);
+ u.ll.lower = ldq_user(addr + 8);
+ QT0 = u.q;
+ break;
+ case MMU_KERNEL_IDX:
+ u.ll.upper = ldq_kernel(addr);
+ u.ll.lower = ldq_kernel(addr + 8);
+ QT0 = u.q;
+ break;
+#ifdef TARGET_SPARC64
+ case MMU_HYPV_IDX:
+ u.ll.upper = ldq_hypv(addr);
+ u.ll.lower = ldq_hypv(addr + 8);
+ QT0 = u.q;
+ break;
+#endif
+ default:
+ DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
+ break;
+ }
+#else
+ u.ll.upper = ldq_raw(address_mask(env, addr));
+ u.ll.lower = ldq_raw(address_mask(env, addr + 8));
+ QT0 = u.q;
+#endif
+}
+
+void helper_stqf(target_ulong addr, int mem_idx)
+{
+ // XXX add 128 bit store
+ CPU_QuadU u;
+
+ helper_check_align(addr, 7);
+#if !defined(CONFIG_USER_ONLY)
+ switch (mem_idx) {
+ case MMU_USER_IDX:
+ u.q = QT0;
+ stq_user(addr, u.ll.upper);
+ stq_user(addr + 8, u.ll.lower);
+ break;
+ case MMU_KERNEL_IDX:
+ u.q = QT0;
+ stq_kernel(addr, u.ll.upper);
+ stq_kernel(addr + 8, u.ll.lower);
+ break;
+#ifdef TARGET_SPARC64
+ case MMU_HYPV_IDX:
+ u.q = QT0;
+ stq_hypv(addr, u.ll.upper);
+ stq_hypv(addr + 8, u.ll.lower);
+ break;
+#endif
+ default:
+ DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
+ break;
+ }
+#else
+ u.q = QT0;
+ stq_raw(address_mask(env, addr), u.ll.upper);
+ stq_raw(address_mask(env, addr + 8), u.ll.lower);
+#endif
+}
+
+static inline void set_fsr(void)
+{
+ int rnd_mode;
+
+ switch (env->fsr & FSR_RD_MASK) {
+ case FSR_RD_NEAREST:
+ rnd_mode = float_round_nearest_even;
+ break;
+ default:
+ case FSR_RD_ZERO:
+ rnd_mode = float_round_to_zero;
+ break;
+ case FSR_RD_POS:
+ rnd_mode = float_round_up;
+ break;
+ case FSR_RD_NEG:
+ rnd_mode = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_mode, &env->fp_status);
+}
+
+void helper_ldfsr(uint32_t new_fsr)
+{
+ env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
+ set_fsr();
+}
+
+#ifdef TARGET_SPARC64
+void helper_ldxfsr(uint64_t new_fsr)
+{
+ env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
+ set_fsr();
+}
+#endif
+
+void helper_debug(void)
+{
+ env->exception_index = EXCP_DEBUG;
+ cpu_loop_exit();
+}
+
+#ifndef TARGET_SPARC64
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(void)
+{
+ uint32_t cwp;
+
+ cwp = cwp_dec(env->cwp - 1);
+ if (env->wim & (1 << cwp)) {
+ raise_exception(TT_WIN_OVF);
+ }
+ set_cwp(cwp);
+}
+
+void helper_restore(void)
+{
+ uint32_t cwp;
+
+ cwp = cwp_inc(env->cwp + 1);
+ if (env->wim & (1 << cwp)) {
+ raise_exception(TT_WIN_UNF);
+ }
+ set_cwp(cwp);
+}
+
+void helper_wrpsr(target_ulong new_psr)
+{
+ if ((new_psr & PSR_CWP) >= env->nwindows) {
+ raise_exception(TT_ILL_INSN);
+ } else {
+ cpu_put_psr(env, new_psr);
+ }
+}
+
+target_ulong helper_rdpsr(void)
+{
+ return get_psr();
+}
+
+#else
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(void)
+{
+ uint32_t cwp;
+
+ cwp = cwp_dec(env->cwp - 1);
+ if (env->cansave == 0) {
+ raise_exception(TT_SPILL | (env->otherwin != 0 ?
+ (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
+ ((env->wstate & 0x7) << 2)));
+ } else {
+ if (env->cleanwin - env->canrestore == 0) {
+ // XXX Clean windows without trap
+ raise_exception(TT_CLRWIN);
+ } else {
+ env->cansave--;
+ env->canrestore++;
+ set_cwp(cwp);
+ }
+ }
+}
+
+void helper_restore(void)
+{
+ uint32_t cwp;
+
+ cwp = cwp_inc(env->cwp + 1);
+ if (env->canrestore == 0) {
+ raise_exception(TT_FILL | (env->otherwin != 0 ?
+ (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
+ ((env->wstate & 0x7) << 2)));
+ } else {
+ env->cansave++;
+ env->canrestore--;
+ set_cwp(cwp);
+ }
+}
+
+void helper_flushw(void)
+{
+ if (env->cansave != env->nwindows - 2) {
+ raise_exception(TT_SPILL | (env->otherwin != 0 ?
+ (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
+ ((env->wstate & 0x7) << 2)));
+ }
+}
+
+void helper_saved(void)
+{
+ env->cansave++;
+ if (env->otherwin == 0)
+ env->canrestore--;
+ else
+ env->otherwin--;
+}
+
+void helper_restored(void)
+{
+ env->canrestore++;
+ if (env->cleanwin < env->nwindows - 1)
+ env->cleanwin++;
+ if (env->otherwin == 0)
+ env->cansave--;
+ else
+ env->otherwin--;
+}
+
+static target_ulong get_ccr(void)
+{
+ target_ulong psr;
+
+ psr = get_psr();
+
+ return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
+}
+
+target_ulong cpu_get_ccr(CPUState *env1)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = get_ccr();
+ env = saved_env;
+ return ret;
+}
+
+static void put_ccr(target_ulong val)
+{
+ target_ulong tmp = val;
+
+ env->xcc = (tmp >> 4) << 20;
+ env->psr = (tmp & 0xf) << 20;
+ CC_OP = CC_OP_FLAGS;
+}
+
+void cpu_put_ccr(CPUState *env1, target_ulong val)
+{
+ CPUState *saved_env;
+
+ saved_env = env;
+ env = env1;
+ put_ccr(val);
+ env = saved_env;
+}
+
+static target_ulong get_cwp64(void)
+{
+ return env->nwindows - 1 - env->cwp;
+}
+
+target_ulong cpu_get_cwp64(CPUState *env1)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = get_cwp64();
+ env = saved_env;
+ return ret;
+}
+
+static void put_cwp64(int cwp)
+{
+ if (unlikely(cwp >= env->nwindows || cwp < 0)) {
+ cwp %= env->nwindows;
+ }
+ set_cwp(env->nwindows - 1 - cwp);
+}
+
+void cpu_put_cwp64(CPUState *env1, int cwp)
+{
+ CPUState *saved_env;
+
+ saved_env = env;
+ env = env1;
+ put_cwp64(cwp);
+ env = saved_env;
+}
+
+target_ulong helper_rdccr(void)
+{
+ return get_ccr();
+}
+
+void helper_wrccr(target_ulong new_ccr)
+{
+ put_ccr(new_ccr);
+}
+
+// CWP handling is reversed in V9, but we still use the V8 register
+// order.
+target_ulong helper_rdcwp(void)
+{
+ return get_cwp64();
+}
+
+void helper_wrcwp(target_ulong new_cwp)
+{
+ put_cwp64(new_cwp);
+}
+
+// This function uses non-native bit order
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
+
+// This function uses the order in the manuals, i.e. bit 0 is 2^0
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 63 - (TO), 63 - (FROM))
+
+target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
+{
+ return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
+ (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
+ (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
+ (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
+ (((pixel_addr >> 55) & 1) << 4) |
+ (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
+ GET_FIELD_SP(pixel_addr, 11, 12);
+}
+
+target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
+{
+ uint64_t tmp;
+
+ tmp = addr + offset;
+ env->gsr &= ~7ULL;
+ env->gsr |= tmp & 7ULL;
+ return tmp & ~7ULL;
+}
+
+target_ulong helper_popc(target_ulong val)
+{
+ return ctpop64(val);
+}
+
+static inline uint64_t *get_gregset(uint32_t pstate)
+{
+ switch (pstate) {
+ default:
+ DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
+ pstate,
+ (pstate & PS_IG) ? " IG" : "",
+ (pstate & PS_MG) ? " MG" : "",
+ (pstate & PS_AG) ? " AG" : "");
+ /* pass through to normal set of global registers */
+ case 0:
+ return env->bgregs;
+ case PS_AG:
+ return env->agregs;
+ case PS_MG:
+ return env->mgregs;
+ case PS_IG:
+ return env->igregs;
+ }
+}
+
+static inline void change_pstate(uint32_t new_pstate)
+{
+ uint32_t pstate_regs, new_pstate_regs;
+ uint64_t *src, *dst;
+
+ if (env->def->features & CPU_FEATURE_GL) {
+ // PS_AG is not implemented in this case
+ new_pstate &= ~PS_AG;
+ }
+
+ pstate_regs = env->pstate & 0xc01;
+ new_pstate_regs = new_pstate & 0xc01;
+
+ if (new_pstate_regs != pstate_regs) {
+ DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
+ pstate_regs, new_pstate_regs);
+ // Switch global register bank
+ src = get_gregset(new_pstate_regs);
+ dst = get_gregset(pstate_regs);
+ memcpy32(dst, env->gregs);
+ memcpy32(env->gregs, src);
+ }
+ else {
+ DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
+ new_pstate_regs);
+ }
+ env->pstate = new_pstate;
+}
+
+void helper_wrpstate(target_ulong new_state)
+{
+ change_pstate(new_state & 0xf3f);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_wrpil(target_ulong new_pil)
+{
+#if !defined(CONFIG_USER_ONLY)
+ DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
+ env->psrpil, (uint32_t)new_pil);
+
+ env->psrpil = new_pil;
+
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_done(void)
+{
+ trap_state* tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tnpc;
+ env->npc = tsptr->tnpc + 4;
+ put_ccr(tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ change_pstate((tsptr->tstate >> 8) & 0xf3f);
+ put_cwp64(tsptr->tstate & 0xff);
+ env->tl--;
+
+ DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_retry(void)
+{
+ trap_state* tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tpc;
+ env->npc = tsptr->tnpc;
+ put_ccr(tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ change_pstate((tsptr->tstate >> 8) & 0xf3f);
+ put_cwp64(tsptr->tstate & 0xff);
+ env->tl--;
+
+ DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+static void do_modify_softint(const char* operation, uint32_t value)
+{
+ if (env->softint != value) {
+ env->softint = value;
+ DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+ }
+}
+
+void helper_set_softint(uint64_t value)
+{
+ do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
+}
+
+void helper_clear_softint(uint64_t value)
+{
+ do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
+}
+
+void helper_write_softint(uint64_t value)
+{
+ do_modify_softint("helper_write_softint", (uint32_t)value);
+}
+#endif
+
+void helper_flush(target_ulong addr)
+{
+ addr &= ~7;
+ tb_invalidate_page_range(addr, addr + 8);
+}
+
+#ifdef TARGET_SPARC64
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_TMISS] = "Instruction Access MMU Miss",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_CLRWIN] = "Clean Windows",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_DMISS] = "Data Access MMU Miss",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DPROT] = "Data Protection Error",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_PRIV_ACT] = "Privileged Action",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+};
+#endif
+
+trap_state* cpu_tsptr(CPUState* env)
+{
+ return &env->ts[env->tl & MAXTL_MASK];
+}
+
+void do_interrupt(CPUState *env)
+{
+ int intno = env->exception_index;
+ trap_state* tsptr;
+
+#ifdef DEBUG_PCALL
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x180)
+ name = "Unknown";
+ else if (intno >= 0x100)
+ name = "Trap Instruction";
+ else if (intno >= 0xc0)
+ name = "Window Fill";
+ else if (intno >= 0x80)
+ name = "Window Spill";
+ else {
+ name = excp_names[intno];
+ if (!name)
+ name = "Unknown";
+ }
+
+ qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
+ " SP=%016" PRIx64 "\n",
+ count, name, intno,
+ env->pc,
+ env->npc, env->regwptr[6]);
+ log_cpu_state(env, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for(i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ if (env->tl >= env->maxtl) {
+ cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
+ " Error state", env->exception_index, env->tl, env->maxtl);
+ return;
+ }
+#endif
+ if (env->tl < env->maxtl - 1) {
+ env->tl++;
+ } else {
+ env->pstate |= PS_RED;
+ if (env->tl < env->maxtl)
+ env->tl++;
+ }
+ tsptr = cpu_tsptr(env);
+
+ tsptr->tstate = (get_ccr() << 32) |
+ ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
+ get_cwp64();
+ tsptr->tpc = env->pc;
+ tsptr->tnpc = env->npc;
+ tsptr->tt = intno;
+
+ switch (intno) {
+ case TT_IVEC:
+ change_pstate(PS_PEF | PS_PRIV | PS_IG);
+ break;
+ case TT_TFAULT:
+ case TT_DFAULT:
+ case TT_TMISS ... TT_TMISS + 3:
+ case TT_DMISS ... TT_DMISS + 3:
+ case TT_DPROT ... TT_DPROT + 3:
+ change_pstate(PS_PEF | PS_PRIV | PS_MG);
+ break;
+ default:
+ change_pstate(PS_PEF | PS_PRIV | PS_AG);
+ break;
+ }
+
+ if (intno == TT_CLRWIN) {
+ set_cwp(cwp_dec(env->cwp - 1));
+ } else if ((intno & 0x1c0) == TT_SPILL) {
+ set_cwp(cwp_dec(env->cwp - env->cansave - 2));
+ } else if ((intno & 0x1c0) == TT_FILL) {
+ set_cwp(cwp_inc(env->cwp + 1));
+ }
+ env->tbr &= ~0x7fffULL;
+ env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+ env->pc = env->tbr;
+ env->npc = env->pc + 4;
+ env->exception_index = -1;
+}
+#else
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_WIN_OVF] = "Window Overflow",
+ [TT_WIN_UNF] = "Window Underflow",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_NCP_INSN] = "Coprocessor Disabled",
+};
+#endif
+
+void do_interrupt(CPUState *env)
+{
+ int cwp, intno = env->exception_index;
+
+#ifdef DEBUG_PCALL
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x100)
+ name = "Unknown";
+ else if (intno >= 0x80)
+ name = "Trap Instruction";
+ else {
+ name = excp_names[intno];
+ if (!name)
+ name = "Unknown";
+ }
+
+ qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
+ count, name, intno,
+ env->pc,
+ env->npc, env->regwptr[6]);
+ log_cpu_state(env, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for(i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ if (env->psret == 0) {
+ cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
+ env->exception_index);
+ return;
+ }
+#endif
+ env->psret = 0;
+ cwp = cwp_dec(env->cwp - 1);
+ set_cwp(cwp);
+ env->regwptr[9] = env->pc;
+ env->regwptr[10] = env->npc;
+ env->psrps = env->psrs;
+ env->psrs = 1;
+ env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
+ env->pc = env->tbr;
+ env->npc = env->pc + 4;
+ env->exception_index = -1;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* IRQ acknowledgment */
+ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
+ env->qemu_irq_ack(env->irq_manager, intno);
+ }
+#endif
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
+ void *retaddr);
+
+#define MMUSUFFIX _mmu
+#define ALIGNED_ONLY
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* XXX: make it generic ? */
+static void cpu_restore_state2(void *retaddr)
+{
+ TranslationBlock *tb;
+ unsigned long pc;
+
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ pc = (unsigned long)retaddr;
+ tb = tb_find_pc(pc);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
+ }
+ }
+}
+
+static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
+ void *retaddr)
+{
+#ifdef DEBUG_UNALIGNED
+ printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+ cpu_restore_state2(retaddr);
+ raise_exception(TT_UNALIGNED);
+}
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
+{
+ int ret;
+ CPUState *saved_env;
+
+ /* XXX: hack to restore env in all cases, even if not called from
+ generated code */
+ saved_env = env;
+ env = cpu_single_env;
+
+ ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
+ if (ret) {
+ cpu_restore_state2(retaddr);
+ cpu_loop_exit();
+ }
+ env = saved_env;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+#ifndef TARGET_SPARC64
+#if !defined(CONFIG_USER_ONLY)
+void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
+ int is_asi, int size)
+{
+ CPUState *saved_env;
+ int fault_type;
+
+ /* XXX: hack to restore env in all cases, even if not called from
+ generated code */
+ saved_env = env;
+ env = cpu_single_env;
+#ifdef DEBUG_UNASSIGNED
+ if (is_asi)
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " asi 0x%02x from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, is_asi, env->pc);
+ else
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, env->pc);
+#endif
+ /* Don't overwrite translation and access faults */
+ fault_type = (env->mmuregs[3] & 0x1c) >> 2;
+ if ((fault_type > 4) || (fault_type == 0)) {
+ env->mmuregs[3] = 0; /* Fault status register */
+ if (is_asi)
+ env->mmuregs[3] |= 1 << 16;
+ if (env->psrs)
+ env->mmuregs[3] |= 1 << 5;
+ if (is_exec)
+ env->mmuregs[3] |= 1 << 6;
+ if (is_write)
+ env->mmuregs[3] |= 1 << 7;
+ env->mmuregs[3] |= (5 << 2) | 2;
+ /* SuperSPARC will never place instruction fault addresses in the FAR */
+ if (!is_exec) {
+ env->mmuregs[4] = addr; /* Fault address register */
+ }
+ }
+ /* overflow (same type fault was not read before another fault) */
+ if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
+ env->mmuregs[3] |= 1;
+ }
+
+ if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
+ if (is_exec)
+ raise_exception(TT_CODE_ACCESS);
+ else
+ raise_exception(TT_DATA_ACCESS);
+ }
+
+ /* flush neverland mappings created during no-fault mode,
+ so the sequential MMU faults report proper fault types */
+ if (env->mmuregs[0] & MMU_NF) {
+ tlb_flush(env, 1);
+ }
+
+ env = saved_env;
+}
+#endif
+#else
+#if defined(CONFIG_USER_ONLY)
+static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
+ int is_asi, int size)
+#else
+void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
+ int is_asi, int size)
+#endif
+{
+ CPUState *saved_env;
+
+ /* XXX: hack to restore env in all cases, even if not called from
+ generated code */
+ saved_env = env;
+ env = cpu_single_env;
+
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+
+ if (is_exec)
+ raise_exception(TT_CODE_ACCESS);
+ else
+ raise_exception(TT_DATA_ACCESS);
+
+ env = saved_env;
+}
+#endif
+
+
+#ifdef TARGET_SPARC64
+void helper_tick_set_count(void *opaque, uint64_t count)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_count(opaque, count);
+#endif
+}
+
+uint64_t helper_tick_get_count(void *opaque)
+{
+#if !defined(CONFIG_USER_ONLY)
+ return cpu_tick_get_count(opaque);
+#else
+ return 0;
+#endif
+}
+
+void helper_tick_set_limit(void *opaque, uint64_t limit)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_limit(opaque, limit);
+#endif
+}
+#endif
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
new file mode 100644
index 0000000..e26462e
--- /dev/null
+++ b/target-sparc/translate.c
@@ -0,0 +1,5106 @@
+/*
+ SPARC translation
+
+ Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
+ Copyright (C) 2003-2005 Fabrice Bellard
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+#include "helper.h"
+#include "tcg-op.h"
+
+#define GEN_HELPER 1
+#include "helper.h"
+
+#define DEBUG_DISAS
+
+#define DYNAMIC_PC 1 /* dynamic pc value */
+#define JUMP_PC 2 /* dynamic pc value which takes only two values
+ according to jump_pc[T2] */
+
+/* global register indexes */
+static TCGv_ptr cpu_env, cpu_regwptr;
+static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
+static TCGv_i32 cpu_cc_op;
+static TCGv_i32 cpu_psr;
+static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
+static TCGv cpu_y;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_tbr;
+#endif
+static TCGv cpu_cond, cpu_dst, cpu_addr, cpu_val;
+#ifdef TARGET_SPARC64
+static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
+static TCGv cpu_gsr;
+static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
+static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
+static TCGv_i32 cpu_softint;
+#else
+static TCGv cpu_wim;
+#endif
+/* local register indexes (only used inside old micro ops) */
+static TCGv cpu_tmp0;
+static TCGv_i32 cpu_tmp32;
+static TCGv_i64 cpu_tmp64;
+/* Floating point registers */
+static TCGv_i32 cpu_fpr[TARGET_FPREGS];
+
+static target_ulong gen_opc_npc[OPC_BUF_SIZE];
+static target_ulong gen_opc_jump_pc[2];
+
+#include "gen-icount.h"
+
+typedef struct DisasContext {
+ target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
+ target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
+ target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
+ int is_br;
+ int mem_idx;
+ int fpu_enabled;
+ int address_mask_32bit;
+ int singlestep;
+ uint32_t cc_op; /* current CC operation */
+ struct TranslationBlock *tb;
+ sparc_def_t *def;
+} DisasContext;
+
+// This function uses non-native bit order
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+
+// This function uses the order in the manuals, i.e. bit 0 is 2^0
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 31 - (TO), 31 - (FROM))
+
+#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
+#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
+
+#ifdef TARGET_SPARC64
+#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
+#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
+#else
+#define DFPREG(r) (r & 0x1e)
+#define QFPREG(r) (r & 0x1c)
+#endif
+
+#define UA2005_HTRAP_MASK 0xff
+#define V8_TRAP_MASK 0x7f
+
+static int sign_extend(int x, int len)
+{
+ len = 32 - len;
+ return (x << len) >> len;
+}
+
+#define IS_IMM (insn & (1<<13))
+
+/* floating point registers moves */
+static void gen_op_load_fpr_DT0(unsigned int src)
+{
+ tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, dt0) +
+ offsetof(CPU_DoubleU, l.upper));
+ tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, dt0) +
+ offsetof(CPU_DoubleU, l.lower));
+}
+
+static void gen_op_load_fpr_DT1(unsigned int src)
+{
+ tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, dt1) +
+ offsetof(CPU_DoubleU, l.upper));
+ tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, dt1) +
+ offsetof(CPU_DoubleU, l.lower));
+}
+
+static void gen_op_store_DT0_fpr(unsigned int dst)
+{
+ tcg_gen_ld_i32(cpu_fpr[dst], cpu_env, offsetof(CPUSPARCState, dt0) +
+ offsetof(CPU_DoubleU, l.upper));
+ tcg_gen_ld_i32(cpu_fpr[dst + 1], cpu_env, offsetof(CPUSPARCState, dt0) +
+ offsetof(CPU_DoubleU, l.lower));
+}
+
+static void gen_op_load_fpr_QT0(unsigned int src)
+{
+ tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.upmost));
+ tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.upper));
+ tcg_gen_st_i32(cpu_fpr[src + 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.lower));
+ tcg_gen_st_i32(cpu_fpr[src + 3], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.lowest));
+}
+
+static void gen_op_load_fpr_QT1(unsigned int src)
+{
+ tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, l.upmost));
+ tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, l.upper));
+ tcg_gen_st_i32(cpu_fpr[src + 2], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, l.lower));
+ tcg_gen_st_i32(cpu_fpr[src + 3], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, l.lowest));
+}
+
+static void gen_op_store_QT0_fpr(unsigned int dst)
+{
+ tcg_gen_ld_i32(cpu_fpr[dst], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.upmost));
+ tcg_gen_ld_i32(cpu_fpr[dst + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.upper));
+ tcg_gen_ld_i32(cpu_fpr[dst + 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.lower));
+ tcg_gen_ld_i32(cpu_fpr[dst + 3], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, l.lowest));
+}
+
+/* moves */
+#ifdef CONFIG_USER_ONLY
+#define supervisor(dc) 0
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) 0
+#endif
+#else
+#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
+#else
+#endif
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(dc) ((dc)->address_mask_32bit)
+#else
+#define AM_CHECK(dc) (1)
+#endif
+#endif
+
+static inline void gen_address_mask(DisasContext *dc, TCGv addr)
+{
+#ifdef TARGET_SPARC64
+ if (AM_CHECK(dc))
+ tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
+#endif
+}
+
+static inline void gen_movl_reg_TN(int reg, TCGv tn)
+{
+ if (reg == 0)
+ tcg_gen_movi_tl(tn, 0);
+ else if (reg < 8)
+ tcg_gen_mov_tl(tn, cpu_gregs[reg]);
+ else {
+ tcg_gen_ld_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
+ }
+}
+
+static inline void gen_movl_TN_reg(int reg, TCGv tn)
+{
+ if (reg == 0)
+ return;
+ else if (reg < 8)
+ tcg_gen_mov_tl(cpu_gregs[reg], tn);
+ else {
+ tcg_gen_st_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
+ }
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num,
+ target_ulong pc, target_ulong npc)
+{
+ TranslationBlock *tb;
+
+ tb = s->tb;
+ if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
+ (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
+ !s->singlestep) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb((long)tb + tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+// XXX suboptimal
+static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_add_tv(TCGv dst, TCGv src1, TCGv src2)
+{
+ TCGv r_temp;
+ TCGv_i32 r_const;
+ int l1;
+
+ l1 = gen_new_label();
+
+ r_temp = tcg_temp_new();
+ tcg_gen_xor_tl(r_temp, src1, src2);
+ tcg_gen_not_tl(r_temp, r_temp);
+ tcg_gen_xor_tl(cpu_tmp0, src1, dst);
+ tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
+ tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
+ r_const = tcg_const_i32(TT_TOVF);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ gen_set_label(l1);
+ tcg_temp_free(r_temp);
+}
+
+static inline void gen_tag_tv(TCGv src1, TCGv src2)
+{
+ int l1;
+ TCGv_i32 r_const;
+
+ l1 = gen_new_label();
+ tcg_gen_or_tl(cpu_tmp0, src1, src2);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x3);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
+ r_const = tcg_const_i32(TT_TOVF);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ gen_set_label(l1);
+}
+
+static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_movi_tl(cpu_cc_src2, src2);
+ tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static TCGv_i32 gen_add32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous add: (dst < src) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
+ tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
+#else
+ cc_src1_32 = cpu_cc_dst;
+ cc_src2_32 = cpu_cc_src;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static TCGv_i32 gen_sub32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous borrow: (src1 < src2) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
+ tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
+#else
+ cc_src1_32 = cpu_cc_src;
+ cc_src2_32 = cpu_cc_src2;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain ADD. */
+ if (update_cc) {
+ gen_op_add_cc(dst, src1, src2);
+ } else {
+ tcg_gen_add_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+#if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
+ {
+ /* For 32-bit hosts, we can re-use the host's hardware carry
+ generation by using an ADD2 opcode. We discard the low
+ part of the output. Ideally we'd combine this operation
+ with the add that generated the carry in the first place. */
+ TCGv dst_low = tcg_temp_new();
+ tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
+ cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(dst_low);
+ goto add_done;
+ }
+#endif
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_add_tl(dst, src1, src2);
+ tcg_gen_add_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+#if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
+ add_done:
+#endif
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
+ dc->cc_op = CC_OP_ADDX;
+ }
+}
+
+static inline void gen_op_tadd_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ gen_tag_tv(cpu_cc_src, cpu_cc_src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ gen_add_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_sub_tv(TCGv dst, TCGv src1, TCGv src2)
+{
+ TCGv r_temp;
+ TCGv_i32 r_const;
+ int l1;
+
+ l1 = gen_new_label();
+
+ r_temp = tcg_temp_new();
+ tcg_gen_xor_tl(r_temp, src1, src2);
+ tcg_gen_xor_tl(cpu_tmp0, src1, dst);
+ tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
+ tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
+ r_const = tcg_const_i32(TT_TOVF);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ gen_set_label(l1);
+ tcg_temp_free(r_temp);
+}
+
+static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_movi_tl(cpu_cc_src2, src2);
+ if (src2 == 0) {
+ tcg_gen_mov_tl(cpu_cc_dst, src1);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ } else {
+ tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ }
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain SUB. */
+ if (update_cc) {
+ gen_op_sub_cc(dst, src1, src2);
+ } else {
+ tcg_gen_sub_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+#if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
+ {
+ /* For 32-bit hosts, we can re-use the host's hardware carry
+ generation by using a SUB2 opcode. We discard the low
+ part of the output. Ideally we'd combine this operation
+ with the add that generated the carry in the first place. */
+ TCGv dst_low = tcg_temp_new();
+ tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
+ cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(dst_low);
+ goto sub_done;
+ }
+#endif
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_sub_tl(dst, src1, src2);
+ tcg_gen_sub_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+#if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
+ sub_done:
+#endif
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
+ dc->cc_op = CC_OP_SUBX;
+ }
+}
+
+static inline void gen_op_tsub_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ gen_tag_tv(cpu_cc_src, cpu_cc_src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ gen_sub_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
+{
+ TCGv r_temp;
+ int l1;
+
+ l1 = gen_new_label();
+ r_temp = tcg_temp_new();
+
+ /* old op:
+ if (!(env->y & 1))
+ T1 = 0;
+ */
+ tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
+ tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
+ tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
+ tcg_gen_brcondi_tl(TCG_COND_NE, r_temp, 0, l1);
+ tcg_gen_movi_tl(cpu_cc_src2, 0);
+ gen_set_label(l1);
+
+ // b2 = T0 & 1;
+ // env->y = (b2 << 31) | (env->y >> 1);
+ tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
+ tcg_gen_shli_tl(r_temp, r_temp, 31);
+ tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
+ tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
+ tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+
+ // b1 = N ^ V;
+ gen_mov_reg_N(cpu_tmp0, cpu_psr);
+ gen_mov_reg_V(r_temp, cpu_psr);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
+ tcg_temp_free(r_temp);
+
+ // T0 = (b1 << 31) | (T0 >> 1);
+ // src1 = T0;
+ tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
+ tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
+
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
+{
+ TCGv_i32 r_src1, r_src2;
+ TCGv_i64 r_temp, r_temp2;
+
+ r_src1 = tcg_temp_new_i32();
+ r_src2 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(r_src1, src1);
+ tcg_gen_trunc_tl_i32(r_src2, src2);
+
+ r_temp = tcg_temp_new_i64();
+ r_temp2 = tcg_temp_new_i64();
+
+ if (sign_ext) {
+ tcg_gen_ext_i32_i64(r_temp, r_src2);
+ tcg_gen_ext_i32_i64(r_temp2, r_src1);
+ } else {
+ tcg_gen_extu_i32_i64(r_temp, r_src2);
+ tcg_gen_extu_i32_i64(r_temp2, r_src1);
+ }
+
+ tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
+
+ tcg_gen_shri_i64(r_temp, r_temp2, 32);
+ tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
+ tcg_temp_free_i64(r_temp);
+ tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+
+ tcg_gen_trunc_i64_tl(dst, r_temp2);
+
+ tcg_temp_free_i64(r_temp2);
+
+ tcg_temp_free_i32(r_src1);
+ tcg_temp_free_i32(r_src2);
+}
+
+static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* zero-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 0);
+}
+
+static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* sign-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 1);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_trap_ifdivzero_tl(TCGv divisor)
+{
+ TCGv_i32 r_const;
+ int l1;
+
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, divisor, 0, l1);
+ r_const = tcg_const_i32(TT_DIV_ZERO);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ gen_set_label(l1);
+}
+
+static inline void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
+{
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ gen_trap_ifdivzero_tl(cpu_cc_src2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_cc_src, INT64_MIN, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_cc_src2, -1, l1);
+ tcg_gen_movi_i64(dst, INT64_MIN);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_div_i64(dst, cpu_cc_src, cpu_cc_src2);
+ gen_set_label(l2);
+}
+#endif
+
+// 1
+static inline void gen_op_eval_ba(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 1);
+}
+
+// Z
+static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+}
+
+// Z | (N ^ V)
+static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(cpu_tmp0, src);
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+ gen_mov_reg_Z(cpu_tmp0, src);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+}
+
+// N ^ V
+static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(cpu_tmp0, src);
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+}
+
+// C | Z
+static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(cpu_tmp0, src);
+ gen_mov_reg_C(dst, src);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+}
+
+// C
+static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+}
+
+// V
+static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+}
+
+// 0
+static inline void gen_op_eval_bn(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 0);
+}
+
+// N
+static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+}
+
+// !Z
+static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(Z | (N ^ V))
+static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(cpu_tmp0, src);
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+ gen_mov_reg_Z(cpu_tmp0, src);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(N ^ V)
+static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(cpu_tmp0, src);
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(C | Z)
+static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(cpu_tmp0, src);
+ gen_mov_reg_C(dst, src);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !C
+static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !N
+static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !V
+static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+/*
+ FPSR bit field FCC1 | FCC0:
+ 0 =
+ 1 <
+ 2 >
+ 3 unordered
+*/
+static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+// !0: FCC0 | FCC1
+static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+}
+
+// 1 or 2: FCC0 ^ FCC1
+static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+}
+
+// 1 or 3: FCC0
+static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+}
+
+// 1: FCC0 & !FCC1
+static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+}
+
+// 2 or 3: FCC1
+static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+}
+
+// 2: !FCC0 & FCC1
+static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+}
+
+// 3: FCC0 & FCC1
+static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+}
+
+// 0: !(FCC0 | FCC1)
+static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// 0 or 3: !(FCC0 ^ FCC1)
+static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// 0 or 2: !FCC0
+static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !1: !(FCC0 & !FCC1)
+static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// 0 or 1: !FCC1
+static inline void gen_op_eval_fble(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !2: !(!FCC0 & FCC1)
+static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !3: !(FCC0 & FCC1)
+static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, cpu_tmp0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
+ target_ulong pc2, TCGv r_cond)
+{
+ int l1;
+
+ l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, pc1, pc1 + 4);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, pc2, pc2 + 4);
+}
+
+static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
+ target_ulong pc2, TCGv r_cond)
+{
+ int l1;
+
+ l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, pc2, pc1);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
+}
+
+static inline void gen_generic_branch(target_ulong npc1, target_ulong npc2,
+ TCGv r_cond)
+{
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ tcg_gen_movi_tl(cpu_npc, npc1);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_npc, npc2);
+ gen_set_label(l2);
+}
+
+/* call this function before using the condition register as it may
+ have been set for a jump */
+static inline void flush_cond(DisasContext *dc, TCGv cond)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
+ dc->npc = DYNAMIC_PC;
+ }
+}
+
+static inline void save_npc(DisasContext *dc, TCGv cond)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
+ dc->npc = DYNAMIC_PC;
+ } else if (dc->npc != DYNAMIC_PC) {
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ }
+}
+
+static inline void save_state(DisasContext *dc, TCGv cond)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ /* flush pending conditional evaluations before exposing cpu state */
+ if (dc->cc_op != CC_OP_FLAGS) {
+ dc->cc_op = CC_OP_FLAGS;
+ gen_helper_compute_psr();
+ }
+ save_npc(dc, cond);
+}
+
+static inline void gen_mov_pc_npc(DisasContext *dc, TCGv cond)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else if (dc->npc == DYNAMIC_PC) {
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else {
+ dc->pc = dc->npc;
+ }
+}
+
+static inline void gen_op_next_insn(void)
+{
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+}
+
+static inline void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
+ DisasContext *dc)
+{
+ TCGv_i32 r_src;
+
+#ifdef TARGET_SPARC64
+ if (cc)
+ r_src = cpu_xcc;
+ else
+ r_src = cpu_psr;
+#else
+ r_src = cpu_psr;
+#endif
+ switch (dc->cc_op) {
+ case CC_OP_FLAGS:
+ break;
+ default:
+ gen_helper_compute_psr();
+ dc->cc_op = CC_OP_FLAGS;
+ break;
+ }
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_be(r_dst, r_src);
+ break;
+ case 0x2:
+ gen_op_eval_ble(r_dst, r_src);
+ break;
+ case 0x3:
+ gen_op_eval_bl(r_dst, r_src);
+ break;
+ case 0x4:
+ gen_op_eval_bleu(r_dst, r_src);
+ break;
+ case 0x5:
+ gen_op_eval_bcs(r_dst, r_src);
+ break;
+ case 0x6:
+ gen_op_eval_bneg(r_dst, r_src);
+ break;
+ case 0x7:
+ gen_op_eval_bvs(r_dst, r_src);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_bne(r_dst, r_src);
+ break;
+ case 0xa:
+ gen_op_eval_bg(r_dst, r_src);
+ break;
+ case 0xb:
+ gen_op_eval_bge(r_dst, r_src);
+ break;
+ case 0xc:
+ gen_op_eval_bgu(r_dst, r_src);
+ break;
+ case 0xd:
+ gen_op_eval_bcc(r_dst, r_src);
+ break;
+ case 0xe:
+ gen_op_eval_bpos(r_dst, r_src);
+ break;
+ case 0xf:
+ gen_op_eval_bvc(r_dst, r_src);
+ break;
+ }
+}
+
+static inline void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
+{
+ unsigned int offset;
+
+ switch (cc) {
+ default:
+ case 0x0:
+ offset = 0;
+ break;
+ case 0x1:
+ offset = 32 - 10;
+ break;
+ case 0x2:
+ offset = 34 - 10;
+ break;
+ case 0x3:
+ offset = 36 - 10;
+ break;
+ }
+
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_fbne(r_dst, cpu_fsr, offset);
+ break;
+ case 0x2:
+ gen_op_eval_fblg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x3:
+ gen_op_eval_fbul(r_dst, cpu_fsr, offset);
+ break;
+ case 0x4:
+ gen_op_eval_fbl(r_dst, cpu_fsr, offset);
+ break;
+ case 0x5:
+ gen_op_eval_fbug(r_dst, cpu_fsr, offset);
+ break;
+ case 0x6:
+ gen_op_eval_fbg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x7:
+ gen_op_eval_fbu(r_dst, cpu_fsr, offset);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_fbe(r_dst, cpu_fsr, offset);
+ break;
+ case 0xa:
+ gen_op_eval_fbue(r_dst, cpu_fsr, offset);
+ break;
+ case 0xb:
+ gen_op_eval_fbge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xc:
+ gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xd:
+ gen_op_eval_fble(r_dst, cpu_fsr, offset);
+ break;
+ case 0xe:
+ gen_op_eval_fbule(r_dst, cpu_fsr, offset);
+ break;
+ case 0xf:
+ gen_op_eval_fbo(r_dst, cpu_fsr, offset);
+ break;
+ }
+}
+
+#ifdef TARGET_SPARC64
+// Inverted logic
+static const int gen_tcg_cond_reg[8] = {
+ -1,
+ TCG_COND_NE,
+ TCG_COND_GT,
+ TCG_COND_GE,
+ -1,
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+};
+
+static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+{
+ int l1;
+
+ l1 = gen_new_label();
+ tcg_gen_movi_tl(r_dst, 0);
+ tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], r_src, 0, l1);
+ tcg_gen_movi_tl(r_dst, 1);
+ gen_set_label(l1);
+}
+#endif
+
+/* XXX: potentially incorrect if dynamic npc */
+static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc,
+ TCGv r_cond)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc, r_cond);
+ gen_cond(r_cond, cc, cond, dc);
+ if (a) {
+ gen_branch_a(dc, target, dc->npc, r_cond);
+ dc->is_br = 1;
+ } else {
+ dc->pc = dc->npc;
+ dc->jump_pc[0] = target;
+ dc->jump_pc[1] = dc->npc + 4;
+ dc->npc = JUMP_PC;
+ }
+ }
+}
+
+/* XXX: potentially incorrect if dynamic npc */
+static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc,
+ TCGv r_cond)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc, r_cond);
+ gen_fcond(r_cond, cc, cond);
+ if (a) {
+ gen_branch_a(dc, target, dc->npc, r_cond);
+ dc->is_br = 1;
+ } else {
+ dc->pc = dc->npc;
+ dc->jump_pc[0] = target;
+ dc->jump_pc[1] = dc->npc + 4;
+ dc->npc = JUMP_PC;
+ }
+ }
+}
+
+#ifdef TARGET_SPARC64
+/* XXX: potentially incorrect if dynamic npc */
+static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
+ TCGv r_cond, TCGv r_reg)
+{
+ unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+ flush_cond(dc, r_cond);
+ gen_cond_reg(r_cond, cond, r_reg);
+ if (a) {
+ gen_branch_a(dc, target, dc->npc, r_cond);
+ dc->is_br = 1;
+ } else {
+ dc->pc = dc->npc;
+ dc->jump_pc[0] = target;
+ dc->jump_pc[1] = dc->npc + 4;
+ dc->npc = JUMP_PC;
+ }
+}
+
+static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmps(r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmps_fcc1(r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmps_fcc2(r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmps_fcc3(r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpd(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpd();
+ break;
+ case 1:
+ gen_helper_fcmpd_fcc1();
+ break;
+ case 2:
+ gen_helper_fcmpd_fcc2();
+ break;
+ case 3:
+ gen_helper_fcmpd_fcc3();
+ break;
+ }
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpq();
+ break;
+ case 1:
+ gen_helper_fcmpq_fcc1();
+ break;
+ case 2:
+ gen_helper_fcmpq_fcc2();
+ break;
+ case 3:
+ gen_helper_fcmpq_fcc3();
+ break;
+ }
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpes(r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmpes_fcc1(r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmpes_fcc2(r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmpes_fcc3(r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmped(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmped();
+ break;
+ case 1:
+ gen_helper_fcmped_fcc1();
+ break;
+ case 2:
+ gen_helper_fcmped_fcc2();
+ break;
+ case 3:
+ gen_helper_fcmped_fcc3();
+ break;
+ }
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpeq();
+ break;
+ case 1:
+ gen_helper_fcmpeq_fcc1();
+ break;
+ case 2:
+ gen_helper_fcmpeq_fcc2();
+ break;
+ case 3:
+ gen_helper_fcmpeq_fcc3();
+ break;
+ }
+}
+
+#else
+
+static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmps(r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpd(int fccno)
+{
+ gen_helper_fcmpd();
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ gen_helper_fcmpq();
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmpes(r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmped(int fccno)
+{
+ gen_helper_fcmped();
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ gen_helper_fcmpeq();
+}
+#endif
+
+static inline void gen_op_fpexception_im(int fsr_flags)
+{
+ TCGv_i32 r_const;
+
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
+ tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
+ r_const = tcg_const_i32(TT_FP_EXCP);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+}
+
+static int gen_trap_ifnofpu(DisasContext *dc, TCGv r_cond)
+{
+#if !defined(CONFIG_USER_ONLY)
+ if (!dc->fpu_enabled) {
+ TCGv_i32 r_const;
+
+ save_state(dc, r_cond);
+ r_const = tcg_const_i32(TT_NFPU_INSN);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ dc->is_br = 1;
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static inline void gen_op_clear_ieee_excp_and_FTT(void)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
+}
+
+static inline void gen_clear_float_exceptions(void)
+{
+ gen_helper_clear_float_exceptions();
+}
+
+/* asi moves */
+#ifdef TARGET_SPARC64
+static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
+{
+ int asi;
+ TCGv_i32 r_asi;
+
+ if (IS_IMM) {
+ r_asi = tcg_temp_new_i32();
+ tcg_gen_mov_i32(r_asi, cpu_asi);
+ } else {
+ asi = GET_FIELD(insn, 19, 26);
+ r_asi = tcg_const_i32(asi);
+ }
+ return r_asi;
+}
+
+static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
+ int sign)
+{
+ TCGv_i32 r_asi, r_size, r_sign;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(size);
+ r_sign = tcg_const_i32(sign);
+ gen_helper_ld_asi(dst, addr, r_asi, r_size, r_sign);
+ tcg_temp_free_i32(r_sign);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
+{
+ TCGv_i32 r_asi, r_size;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(size);
+ gen_helper_st_asi(addr, src, r_asi, r_size);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
+{
+ TCGv_i32 r_asi, r_size, r_rd;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(size);
+ r_rd = tcg_const_i32(rd);
+ gen_helper_ldf_asi(addr, r_asi, r_size, r_rd);
+ tcg_temp_free_i32(r_rd);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
+{
+ TCGv_i32 r_asi, r_size, r_rd;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(size);
+ r_rd = tcg_const_i32(rd);
+ gen_helper_stf_asi(addr, r_asi, r_size, r_rd);
+ tcg_temp_free_i32(r_rd);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
+{
+ TCGv_i32 r_asi, r_size, r_sign;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(4);
+ r_sign = tcg_const_i32(0);
+ gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
+ tcg_temp_free_i32(r_sign);
+ gen_helper_st_asi(addr, dst, r_asi, r_size);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+ tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
+}
+
+static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
+{
+ TCGv_i32 r_asi, r_rd;
+
+ r_asi = gen_get_asi(insn, addr);
+ r_rd = tcg_const_i32(rd);
+ gen_helper_ldda_asi(addr, r_asi, r_rd);
+ tcg_temp_free_i32(r_rd);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
+{
+ TCGv_i32 r_asi, r_size;
+
+ gen_movl_reg_TN(rd + 1, cpu_tmp0);
+ tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
+ r_asi = gen_get_asi(insn, addr);
+ r_size = tcg_const_i32(8);
+ gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+}
+
+static inline void gen_cas_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
+ int rd)
+{
+ TCGv r_val1;
+ TCGv_i32 r_asi;
+
+ r_val1 = tcg_temp_new();
+ gen_movl_reg_TN(rd, r_val1);
+ r_asi = gen_get_asi(insn, addr);
+ gen_helper_cas_asi(dst, addr, r_val1, val2, r_asi);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free(r_val1);
+}
+
+static inline void gen_casx_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
+ int rd)
+{
+ TCGv_i32 r_asi;
+
+ gen_movl_reg_TN(rd, cpu_tmp64);
+ r_asi = gen_get_asi(insn, addr);
+ gen_helper_casx_asi(dst, addr, cpu_tmp64, val2, r_asi);
+ tcg_temp_free_i32(r_asi);
+}
+
+#elif !defined(CONFIG_USER_ONLY)
+
+static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
+ int sign)
+{
+ TCGv_i32 r_asi, r_size, r_sign;
+
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(size);
+ r_sign = tcg_const_i32(sign);
+ gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
+ tcg_temp_free(r_sign);
+ tcg_temp_free(r_size);
+ tcg_temp_free(r_asi);
+ tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
+}
+
+static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
+{
+ TCGv_i32 r_asi, r_size;
+
+ tcg_gen_extu_tl_i64(cpu_tmp64, src);
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(size);
+ gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
+ tcg_temp_free(r_size);
+ tcg_temp_free(r_asi);
+}
+
+static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
+{
+ TCGv_i32 r_asi, r_size, r_sign;
+ TCGv_i64 r_val;
+
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(4);
+ r_sign = tcg_const_i32(0);
+ gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
+ tcg_temp_free(r_sign);
+ r_val = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(r_val, dst);
+ gen_helper_st_asi(addr, r_val, r_asi, r_size);
+ tcg_temp_free_i64(r_val);
+ tcg_temp_free(r_size);
+ tcg_temp_free(r_asi);
+ tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
+}
+
+static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
+{
+ TCGv_i32 r_asi, r_size, r_sign;
+
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(8);
+ r_sign = tcg_const_i32(0);
+ gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
+ tcg_temp_free(r_sign);
+ tcg_temp_free(r_size);
+ tcg_temp_free(r_asi);
+ tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
+ gen_movl_TN_reg(rd + 1, cpu_tmp0);
+ tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
+ tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
+ gen_movl_TN_reg(rd, hi);
+}
+
+static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
+{
+ TCGv_i32 r_asi, r_size;
+
+ gen_movl_reg_TN(rd + 1, cpu_tmp0);
+ tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(8);
+ gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
+ tcg_temp_free(r_size);
+ tcg_temp_free(r_asi);
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
+{
+ TCGv_i64 r_val;
+ TCGv_i32 r_asi, r_size;
+
+ gen_ld_asi(dst, addr, insn, 1, 0);
+
+ r_val = tcg_const_i64(0xffULL);
+ r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
+ r_size = tcg_const_i32(1);
+ gen_helper_st_asi(addr, r_val, r_asi, r_size);
+ tcg_temp_free_i32(r_size);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free_i64(r_val);
+}
+#endif
+
+static inline TCGv get_src1(unsigned int insn, TCGv def)
+{
+ TCGv r_rs1 = def;
+ unsigned int rs1;
+
+ rs1 = GET_FIELD(insn, 13, 17);
+ if (rs1 == 0) {
+ tcg_gen_movi_tl(def, 0);
+ } else if (rs1 < 8) {
+ r_rs1 = cpu_gregs[rs1];
+ } else {
+ tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
+ }
+ return r_rs1;
+}
+
+static inline TCGv get_src2(unsigned int insn, TCGv def)
+{
+ TCGv r_rs2 = def;
+
+ if (IS_IMM) { /* immediate */
+ target_long simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_movi_tl(def, simm);
+ } else { /* register */
+ unsigned int rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 == 0) {
+ tcg_gen_movi_tl(def, 0);
+ } else if (rs2 < 8) {
+ r_rs2 = cpu_gregs[rs2];
+ } else {
+ tcg_gen_ld_tl(def, cpu_regwptr, (rs2 - 8) * sizeof(target_ulong));
+ }
+ }
+ return r_rs2;
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
+{
+ TCGv_i32 r_tl = tcg_temp_new_i32();
+
+ /* load env->tl into r_tl */
+ tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
+
+ /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
+ tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
+
+ /* calculate offset to current trap state from env->ts, reuse r_tl */
+ tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
+ tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUState, ts));
+
+ /* tsptr = env->ts[env->tl & MAXTL_MASK] */
+ {
+ TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
+ tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
+ tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
+ tcg_temp_free_ptr(r_tl_tmp);
+ }
+
+ tcg_temp_free_i32(r_tl);
+}
+#endif
+
+#define CHECK_IU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto illegal_insn;
+#define CHECK_FPU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto nfpu_insn;
+
+/* before an instruction, dc->pc must be static */
+static void disas_sparc_insn(DisasContext * dc)
+{
+ unsigned int insn, opc, rs1, rs2, rd;
+ TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
+ target_long simm;
+
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
+ tcg_gen_debug_insn_start(dc->pc);
+ insn = ldl_code(dc->pc);
+ opc = GET_FIELD(insn, 0, 1);
+
+ rd = GET_FIELD(insn, 2, 6);
+
+ cpu_tmp1 = cpu_src1 = tcg_temp_new();
+ cpu_tmp2 = cpu_src2 = tcg_temp_new();
+
+ switch (opc) {
+ case 0: /* branches/sethi */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 9);
+ int32_t target;
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x1: /* V9 BPcc */
+ {
+ int cc;
+
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 18);
+ target <<= 2;
+ cc = GET_FIELD_SP(insn, 20, 21);
+ if (cc == 0)
+ do_branch(dc, target, insn, 0, cpu_cond);
+ else if (cc == 2)
+ do_branch(dc, target, insn, 1, cpu_cond);
+ else
+ goto illegal_insn;
+ goto jmp_insn;
+ }
+ case 0x3: /* V9 BPr */
+ {
+ target = GET_FIELD_SP(insn, 0, 13) |
+ (GET_FIELD_SP(insn, 20, 21) << 14);
+ target = sign_extend(target, 16);
+ target <<= 2;
+ cpu_src1 = get_src1(insn, cpu_src1);
+ do_branch_reg(dc, target, insn, cpu_cond, cpu_src1);
+ goto jmp_insn;
+ }
+ case 0x5: /* V9 FBPcc */
+ {
+ int cc = GET_FIELD_SP(insn, 20, 21);
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 19);
+ target <<= 2;
+ do_fbranch(dc, target, insn, cc, cpu_cond);
+ goto jmp_insn;
+ }
+#else
+ case 0x7: /* CBN+x */
+ {
+ goto ncp_insn;
+ }
+#endif
+ case 0x2: /* BN+x */
+ {
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_branch(dc, target, insn, 0, cpu_cond);
+ goto jmp_insn;
+ }
+ case 0x6: /* FBN+x */
+ {
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_fbranch(dc, target, insn, 0, cpu_cond);
+ goto jmp_insn;
+ }
+ case 0x4: /* SETHI */
+ if (rd) { // nop
+ uint32_t value = GET_FIELD(insn, 10, 31);
+ TCGv r_const;
+
+ r_const = tcg_const_tl(value << 10);
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ }
+ break;
+ case 0x0: /* UNIMPL */
+ default:
+ goto illegal_insn;
+ }
+ break;
+ }
+ break;
+ case 1: /*CALL*/
+ {
+ target_long target = GET_FIELDs(insn, 2, 31) << 2;
+ TCGv r_const;
+
+ r_const = tcg_const_tl(dc->pc);
+ gen_movl_TN_reg(15, r_const);
+ tcg_temp_free(r_const);
+ target += dc->pc;
+ gen_mov_pc_npc(dc, cpu_cond);
+ dc->npc = target;
+ }
+ goto jmp_insn;
+ case 2: /* FPU & Logical Operations */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+ if (xop == 0x3a) { /* generate trap */
+ int cond;
+
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) {
+ rs2 = GET_FIELD(insn, 25, 31);
+ tcg_gen_addi_tl(cpu_dst, cpu_src1, rs2);
+ } else {
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 != 0) {
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ } else
+ tcg_gen_mov_tl(cpu_dst, cpu_src1);
+ }
+
+ cond = GET_FIELD(insn, 3, 6);
+ if (cond == 0x8) { /* Trap Always */
+ save_state(dc, cpu_cond);
+ if ((dc->def->features & CPU_FEATURE_HYPV) &&
+ supervisor(dc))
+ tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
+ else
+ tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
+ tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+
+ if (rs2 == 0 &&
+ dc->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
+
+ gen_helper_shutdown();
+
+ } else {
+ gen_helper_raise_exception(cpu_tmp32);
+ }
+ } else if (cond != 0) {
+ TCGv r_cond = tcg_temp_new();
+ int l1;
+#ifdef TARGET_SPARC64
+ /* V9 icc/xcc */
+ int cc = GET_FIELD_SP(insn, 11, 12);
+
+ save_state(dc, cpu_cond);
+ if (cc == 0)
+ gen_cond(r_cond, 0, cond, dc);
+ else if (cc == 2)
+ gen_cond(r_cond, 1, cond, dc);
+ else
+ goto illegal_insn;
+#else
+ save_state(dc, cpu_cond);
+ gen_cond(r_cond, 0, cond, dc);
+#endif
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ if ((dc->def->features & CPU_FEATURE_HYPV) &&
+ supervisor(dc))
+ tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
+ else
+ tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
+ tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ gen_helper_raise_exception(cpu_tmp32);
+
+ gen_set_label(l1);
+ tcg_temp_free(r_cond);
+ }
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ goto jmp_insn;
+ } else if (xop == 0x28) {
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch(rs1) {
+ case 0: /* rdy */
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0e: /* undefined in the SPARCv8
+ manual, rdy on the microSPARC
+ II */
+ case 0x0f: /* stbar in the SPARCv8 manual,
+ rdy on the microSPARC II */
+ case 0x10 ... 0x1f: /* implementation-dependent in the
+ SPARCv8 manual, rdy on the
+ microSPARC II */
+ /* Read Asr17 */
+ if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
+ TCGv r_const;
+
+ /* Read Asr17 for a Leon3 monoprocessor */
+ r_const = tcg_const_tl((1 << 8)
+ | (dc->def->nwindows - 1));
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ break;
+ }
+#endif
+ gen_movl_TN_reg(rd, cpu_y);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 rdccr */
+ gen_helper_compute_psr();
+ gen_helper_rdccr(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x3: /* V9 rdasi */
+ tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x4: /* V9 rdtick */
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, tick));
+ gen_helper_tick_get_count(cpu_dst, r_tickptr);
+ tcg_temp_free_ptr(r_tickptr);
+ gen_movl_TN_reg(rd, cpu_dst);
+ }
+ break;
+ case 0x5: /* V9 rdpc */
+ {
+ TCGv r_const;
+
+ r_const = tcg_const_tl(dc->pc);
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ }
+ break;
+ case 0x6: /* V9 rdfprs */
+ tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0xf: /* V9 membar */
+ break; /* no effect */
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ gen_movl_TN_reg(rd, cpu_gsr);
+ break;
+ case 0x16: /* Softint */
+ tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x17: /* Tick compare */
+ gen_movl_TN_reg(rd, cpu_tick_cmpr);
+ break;
+ case 0x18: /* System tick */
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, stick));
+ gen_helper_tick_get_count(cpu_dst, r_tickptr);
+ tcg_temp_free_ptr(r_tickptr);
+ gen_movl_TN_reg(rd, cpu_dst);
+ }
+ break;
+ case 0x19: /* System tick compare */
+ gen_movl_TN_reg(rd, cpu_stick_cmpr);
+ break;
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation Counter */
+ case 0x12: /* Dispatch Control */
+ case 0x14: /* Softint set, WO */
+ case 0x15: /* Softint clear, WO */
+#endif
+ default:
+ goto illegal_insn;
+ }
+#if !defined(CONFIG_USER_ONLY)
+ } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_helper_compute_psr();
+ dc->cc_op = CC_OP_FLAGS;
+ gen_helper_rdpsr(cpu_dst);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // hpstate
+ // gen_op_rdhpstate();
+ break;
+ case 1: // htstate
+ // gen_op_rdhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_dst, cpu_hintp);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_dst, cpu_htba);
+ break;
+ case 6: // hver
+ tcg_gen_mov_tl(cpu_dst, cpu_hver);
+ break;
+ case 31: // hstick_cmpr
+ tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#endif
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
+ if (!supervisor(dc))
+ goto priv_insn;
+#ifdef TARGET_SPARC64
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, tick));
+ gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
+ gen_movl_TN_reg(rd, cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
+ break;
+ case 6: // pstate
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, pstate));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 7: // tl
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 8: // pil
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, psrpil));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 9: // cwp
+ gen_helper_rdcwp(cpu_tmp0);
+ break;
+ case 10: // cansave
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, cansave));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 11: // canrestore
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, canrestore));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 12: // cleanwin
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, cleanwin));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 13: // otherwin
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, otherwin));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 14: // wstate
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, wstate));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, gl));
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
+ break;
+ case 31: // ver
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
+ break;
+ case 15: // fq
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
+#endif
+ gen_movl_TN_reg(rd, cpu_tmp0);
+ break;
+ } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
+#ifdef TARGET_SPARC64
+ save_state(dc, cpu_cond);
+ gen_helper_flushw();
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_movl_TN_reg(rd, cpu_tbr);
+#endif
+ break;
+#endif
+ } else if (xop == 0x34) { /* FPU Operations */
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+ save_state(dc, cpu_cond);
+ switch (xop) {
+ case 0x1: /* fmovs */
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]);
+ break;
+ case 0x5: /* fnegs */
+ gen_helper_fnegs(cpu_fpr[rd], cpu_fpr[rs2]);
+ break;
+ case 0x9: /* fabss */
+ gen_helper_fabss(cpu_fpr[rd], cpu_fpr[rs2]);
+ break;
+ case 0x29: /* fsqrts */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_clear_float_exceptions();
+ gen_helper_fsqrts(cpu_tmp32, cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x2a: /* fsqrtd */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fsqrtd();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x2b: /* fsqrtq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fsqrtq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x41: /* fadds */
+ gen_clear_float_exceptions();
+ gen_helper_fadds(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x42: /* faddd */
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_faddd();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x43: /* faddq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_faddq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x45: /* fsubs */
+ gen_clear_float_exceptions();
+ gen_helper_fsubs(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x46: /* fsubd */
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fsubd();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x47: /* fsubq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fsubq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x49: /* fmuls */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_clear_float_exceptions();
+ gen_helper_fmuls(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x4a: /* fmuld */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fmuld();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x4b: /* fmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fmulq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x4d: /* fdivs */
+ gen_clear_float_exceptions();
+ gen_helper_fdivs(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x4e: /* fdivd */
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdivd();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x4f: /* fdivq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdivq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x69: /* fsmuld */
+ CHECK_FPU_FEATURE(dc, FSMULD);
+ gen_clear_float_exceptions();
+ gen_helper_fsmuld(cpu_fpr[rs1], cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x6e: /* fdmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdmulq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0xc4: /* fitos */
+ gen_clear_float_exceptions();
+ gen_helper_fitos(cpu_tmp32, cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0xc6: /* fdtos */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdtos(cpu_tmp32);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0xc7: /* fqtos */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fqtos(cpu_tmp32);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0xc8: /* fitod */
+ gen_helper_fitod(cpu_fpr[rs2]);
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0xc9: /* fstod */
+ gen_helper_fstod(cpu_fpr[rs2]);
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0xcb: /* fqtod */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fqtod();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0xcc: /* fitoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_helper_fitoq(cpu_fpr[rs2]);
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0xcd: /* fstoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_helper_fstoq(cpu_fpr[rs2]);
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0xce: /* fdtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fdtoq();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0xd1: /* fstoi */
+ gen_clear_float_exceptions();
+ gen_helper_fstoi(cpu_tmp32, cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0xd2: /* fdtoi */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdtoi(cpu_tmp32);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0xd3: /* fqtoi */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fqtoi(cpu_tmp32);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 fmovd */
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x3: /* V9 fmovq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], cpu_fpr[QFPREG(rs2)]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1],
+ cpu_fpr[QFPREG(rs2) + 1]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2],
+ cpu_fpr[QFPREG(rs2) + 2]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3],
+ cpu_fpr[QFPREG(rs2) + 3]);
+ break;
+ case 0x6: /* V9 fnegd */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fnegd();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x7: /* V9 fnegq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_helper_fnegq();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0xa: /* V9 fabsd */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fabsd();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0xb: /* V9 fabsq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_helper_fabsq();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+ case 0x81: /* V9 fstox */
+ gen_clear_float_exceptions();
+ gen_helper_fstox(cpu_fpr[rs2]);
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x82: /* V9 fdtox */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fdtox();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x83: /* V9 fqtox */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fqtox();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x84: /* V9 fxtos */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fxtos(cpu_tmp32);
+ gen_helper_check_ieee_exceptions();
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32);
+ break;
+ case 0x88: /* V9 fxtod */
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fxtod();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x8c: /* V9 fxtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_clear_float_exceptions();
+ gen_helper_fxtoq();
+ gen_helper_check_ieee_exceptions();
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x35) { /* FPU Operations */
+#ifdef TARGET_SPARC64
+ int cond;
+#endif
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+ save_state(dc, cpu_cond);
+#ifdef TARGET_SPARC64
+ if ((xop & 0x11f) == 0x005) { // V9 fmovsr
+ int l1;
+
+ l1 = gen_new_label();
+ cond = GET_FIELD_SP(insn, 14, 17);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
+ 0, l1);
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]);
+ gen_set_label(l1);
+ break;
+ } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
+ int l1;
+
+ l1 = gen_new_label();
+ cond = GET_FIELD_SP(insn, 14, 17);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
+ 0, l1);
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], cpu_fpr[DFPREG(rs2) + 1]);
+ gen_set_label(l1);
+ break;
+ } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
+ int l1;
+
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ l1 = gen_new_label();
+ cond = GET_FIELD_SP(insn, 14, 17);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
+ 0, l1);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], cpu_fpr[QFPREG(rs2)]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], cpu_fpr[QFPREG(rs2) + 1]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], cpu_fpr[QFPREG(rs2) + 2]);
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], cpu_fpr[QFPREG(rs2) + 3]);
+ gen_set_label(l1);
+ break;
+ }
+#endif
+ switch (xop) {
+#ifdef TARGET_SPARC64
+#define FMOVSCC(fcc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_fcond(r_cond, fcc, cond); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+#define FMOVDCC(fcc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_fcond(r_cond, fcc, cond); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], \
+ cpu_fpr[DFPREG(rs2)]); \
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], \
+ cpu_fpr[DFPREG(rs2) + 1]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+#define FMOVQCC(fcc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_fcond(r_cond, fcc, cond); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], \
+ cpu_fpr[QFPREG(rs2)]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], \
+ cpu_fpr[QFPREG(rs2) + 1]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], \
+ cpu_fpr[QFPREG(rs2) + 2]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], \
+ cpu_fpr[QFPREG(rs2) + 3]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+ case 0x001: /* V9 fmovscc %fcc0 */
+ FMOVSCC(0);
+ break;
+ case 0x002: /* V9 fmovdcc %fcc0 */
+ FMOVDCC(0);
+ break;
+ case 0x003: /* V9 fmovqcc %fcc0 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(0);
+ break;
+ case 0x041: /* V9 fmovscc %fcc1 */
+ FMOVSCC(1);
+ break;
+ case 0x042: /* V9 fmovdcc %fcc1 */
+ FMOVDCC(1);
+ break;
+ case 0x043: /* V9 fmovqcc %fcc1 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(1);
+ break;
+ case 0x081: /* V9 fmovscc %fcc2 */
+ FMOVSCC(2);
+ break;
+ case 0x082: /* V9 fmovdcc %fcc2 */
+ FMOVDCC(2);
+ break;
+ case 0x083: /* V9 fmovqcc %fcc2 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(2);
+ break;
+ case 0x0c1: /* V9 fmovscc %fcc3 */
+ FMOVSCC(3);
+ break;
+ case 0x0c2: /* V9 fmovdcc %fcc3 */
+ FMOVDCC(3);
+ break;
+ case 0x0c3: /* V9 fmovqcc %fcc3 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(3);
+ break;
+#undef FMOVSCC
+#undef FMOVDCC
+#undef FMOVQCC
+#define FMOVSCC(icc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_cond(r_cond, icc, cond, dc); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+#define FMOVDCC(icc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_cond(r_cond, icc, cond, dc); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], \
+ cpu_fpr[DFPREG(rs2)]); \
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], \
+ cpu_fpr[DFPREG(rs2) + 1]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+#define FMOVQCC(icc) \
+ { \
+ TCGv r_cond; \
+ int l1; \
+ \
+ l1 = gen_new_label(); \
+ r_cond = tcg_temp_new(); \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_cond(r_cond, icc, cond, dc); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
+ 0, l1); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], \
+ cpu_fpr[QFPREG(rs2)]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], \
+ cpu_fpr[QFPREG(rs2) + 1]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], \
+ cpu_fpr[QFPREG(rs2) + 2]); \
+ tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], \
+ cpu_fpr[QFPREG(rs2) + 3]); \
+ gen_set_label(l1); \
+ tcg_temp_free(r_cond); \
+ }
+
+ case 0x101: /* V9 fmovscc %icc */
+ FMOVSCC(0);
+ break;
+ case 0x102: /* V9 fmovdcc %icc */
+ FMOVDCC(0);
+ case 0x103: /* V9 fmovqcc %icc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(0);
+ break;
+ case 0x181: /* V9 fmovscc %xcc */
+ FMOVSCC(1);
+ break;
+ case 0x182: /* V9 fmovdcc %xcc */
+ FMOVDCC(1);
+ break;
+ case 0x183: /* V9 fmovqcc %xcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVQCC(1);
+ break;
+#undef FMOVSCC
+#undef FMOVDCC
+#undef FMOVQCC
+#endif
+ case 0x51: /* fcmps, V9 %fcc */
+ gen_op_fcmps(rd & 3, cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x52: /* fcmpd, V9 %fcc */
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_op_fcmpd(rd & 3);
+ break;
+ case 0x53: /* fcmpq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpq(rd & 3);
+ break;
+ case 0x55: /* fcmpes, V9 %fcc */
+ gen_op_fcmpes(rd & 3, cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x56: /* fcmped, V9 %fcc */
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_op_fcmped(rd & 3);
+ break;
+ case 0x57: /* fcmpeq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpeq(rd & 3);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x2) {
+ // clr/mov shortcut
+
+ rs1 = GET_FIELD(insn, 13, 17);
+ if (rs1 == 0) {
+ // or %g0, x, y -> mov T0, x; mov y, T0
+ if (IS_IMM) { /* immediate */
+ TCGv r_const;
+
+ simm = GET_FIELDs(insn, 19, 31);
+ r_const = tcg_const_tl(simm);
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ gen_movl_reg_TN(rs2, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
+ }
+ } else {
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
+ gen_movl_TN_reg(rd, cpu_dst);
+ } else { /* register */
+ // or x, %g0, y -> mov T1, x; mov y, T1
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 != 0) {
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ } else
+ gen_movl_TN_reg(rd, cpu_src1);
+ }
+ }
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x25) { /* sll, V9 sllx */
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ }
+ tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ } else if (xop == 0x26) { /* srl, V9 srlx */
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ } else if (xop == 0x27) { /* sra, V9 srax */
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
+ tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
+ tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+#endif
+ } else if (xop < 0x36) {
+ if (xop < 0x20) {
+ cpu_src1 = get_src1(insn, cpu_src1);
+ cpu_src2 = get_src2(insn, cpu_src2);
+ switch (xop & ~0x10) {
+ case 0x0: /* add */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ if (xop & 0x10) {
+ gen_op_addi_cc(cpu_dst, cpu_src1, simm);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ } else {
+ tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
+ }
+ } else {
+ if (xop & 0x10) {
+ gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ } else {
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ }
+ break;
+ case 0x1: /* and */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_andi_tl(cpu_dst, cpu_src1, simm);
+ } else {
+ tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x2: /* or */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
+ } else {
+ tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x3: /* xor */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_xori_tl(cpu_dst, cpu_src1, simm);
+ } else {
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x4: /* sub */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ if (xop & 0x10) {
+ gen_op_subi_cc(cpu_dst, cpu_src1, simm, dc);
+ } else {
+ tcg_gen_subi_tl(cpu_dst, cpu_src1, simm);
+ }
+ } else {
+ if (xop & 0x10) {
+ gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ } else {
+ tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ }
+ break;
+ case 0x5: /* andn */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_andi_tl(cpu_dst, cpu_src1, ~simm);
+ } else {
+ tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x6: /* orn */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_ori_tl(cpu_dst, cpu_src1, ~simm);
+ } else {
+ tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x7: /* xorn */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_xori_tl(cpu_dst, cpu_src1, ~simm);
+ } else {
+ tcg_gen_not_tl(cpu_tmp0, cpu_src2);
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x8: /* addx, V9 addc */
+ gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0x9: /* V9 mulx */
+ if (IS_IMM) {
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_muli_i64(cpu_dst, cpu_src1, simm);
+ } else {
+ tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+#endif
+ case 0xa: /* umul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xb: /* smul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xc: /* subx, V9 subc */
+ gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0xd: /* V9 udivx */
+ tcg_gen_mov_tl(cpu_cc_src, cpu_src1);
+ tcg_gen_mov_tl(cpu_cc_src2, cpu_src2);
+ gen_trap_ifdivzero_tl(cpu_cc_src2);
+ tcg_gen_divu_i64(cpu_dst, cpu_cc_src, cpu_cc_src2);
+ break;
+#endif
+ case 0xe: /* udiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_udiv_cc(cpu_dst, cpu_src1, cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_udiv(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ case 0xf: /* sdiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_sdiv_cc(cpu_dst, cpu_src1, cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_sdiv(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ default:
+ goto illegal_insn;
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ } else {
+ cpu_src1 = get_src1(insn, cpu_src1);
+ cpu_src2 = get_src2(insn, cpu_src2);
+ switch (xop) {
+ case 0x20: /* taddcc */
+ gen_op_tadd_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
+ dc->cc_op = CC_OP_TADD;
+ break;
+ case 0x21: /* tsubcc */
+ gen_op_tsub_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
+ dc->cc_op = CC_OP_TSUB;
+ break;
+ case 0x22: /* taddcctv */
+ save_state(dc, cpu_cond);
+ gen_op_tadd_ccTV(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADDTV);
+ dc->cc_op = CC_OP_TADDTV;
+ break;
+ case 0x23: /* tsubcctv */
+ save_state(dc, cpu_cond);
+ gen_op_tsub_ccTV(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUBTV);
+ dc->cc_op = CC_OP_TSUBTV;
+ break;
+ case 0x24: /* mulscc */
+ gen_helper_compute_psr();
+ gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ break;
+#ifndef TARGET_SPARC64
+ case 0x25: /* sll */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x26: /* srl */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x27: /* sra */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+#endif
+ case 0x30:
+ {
+ switch(rd) {
+ case 0: /* wry */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+ break;
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0f: /* undefined in the
+ SPARCv8 manual, nop
+ on the microSPARC
+ II */
+ case 0x10 ... 0x1f: /* implementation-dependent
+ in the SPARCv8
+ manual, nop on the
+ microSPARC II */
+ break;
+#else
+ case 0x2: /* V9 wrccr */
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ gen_helper_wrccr(cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ break;
+ case 0x3: /* V9 wrasi */
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
+ tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
+ break;
+ case 0x6: /* V9 wrfprs */
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
+ save_state(dc, cpu_cond);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ break;
+ case 0xf: /* V9 sir, nop if user */
+#if !defined(CONFIG_USER_ONLY)
+ if (supervisor(dc)) {
+ ; // XXX
+ }
+#endif
+ break;
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
+ break;
+ case 0x14: /* Softint set */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
+ gen_helper_set_softint(cpu_tmp64);
+ break;
+ case 0x15: /* Softint clear */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
+ gen_helper_clear_softint(cpu_tmp64);
+ break;
+ case 0x16: /* Softint write */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
+ gen_helper_write_softint(cpu_tmp64);
+ break;
+ case 0x17: /* Tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, tick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_tick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 0x18: /* System tick */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_dst, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, stick));
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_dst);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 0x19: /* System tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, stick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_stick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation
+ Counter */
+ case 0x12: /* Dispatch Control */
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 0x31: /* wrpsr, V9 saved, restored */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0:
+ gen_helper_saved();
+ break;
+ case 1:
+ gen_helper_restored();
+ break;
+ case 2: /* UA2005 allclean */
+ case 3: /* UA2005 otherw */
+ case 4: /* UA2005 normalw */
+ case 5: /* UA2005 invalw */
+ // XXX
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ gen_helper_wrpsr(cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ save_state(dc, cpu_cond);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+#endif
+ }
+ break;
+ case 0x32: /* wrwim, V9 wrpr */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state,
+ tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, tick));
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
+ break;
+ case 6: // pstate
+ save_state(dc, cpu_cond);
+ gen_helper_wrpstate(cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 7: // tl
+ save_state(dc, cpu_cond);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 8: // pil
+ gen_helper_wrpil(cpu_tmp0);
+ break;
+ case 9: // cwp
+ gen_helper_wrcwp(cpu_tmp0);
+ break;
+ case 10: // cansave
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState,
+ cansave));
+ break;
+ case 11: // canrestore
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState,
+ canrestore));
+ break;
+ case 12: // cleanwin
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState,
+ cleanwin));
+ break;
+ case 13: // otherwin
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState,
+ otherwin));
+ break;
+ case 14: // wstate
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState,
+ wstate));
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ tcg_gen_st_i32(cpu_tmp32, cpu_env,
+ offsetof(CPUSPARCState, gl));
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ if (dc->def->nwindows != 32)
+ tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
+ (1 << dc->def->nwindows) - 1);
+ tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
+#endif
+ }
+ break;
+ case 0x33: /* wrtbr, UA2005 wrhpr */
+ {
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc))
+ goto priv_insn;
+ tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ switch (rd) {
+ case 0: // hpstate
+ // XXX gen_op_wrhpstate();
+ save_state(dc, cpu_cond);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ break;
+ case 1: // htstate
+ // XXX gen_op_wrhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
+ break;
+ case 31: // hstick_cmpr
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUState, hstick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_hstick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 6: // hver readonly
+ default:
+ goto illegal_insn;
+ }
+#endif
+ }
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x2c: /* V9 movcc */
+ {
+ int cc = GET_FIELD_SP(insn, 11, 12);
+ int cond = GET_FIELD_SP(insn, 14, 17);
+ TCGv r_cond;
+ int l1;
+
+ r_cond = tcg_temp_new();
+ if (insn & (1 << 18)) {
+ if (cc == 0)
+ gen_cond(r_cond, 0, cond, dc);
+ else if (cc == 2)
+ gen_cond(r_cond, 1, cond, dc);
+ else
+ goto illegal_insn;
+ } else {
+ gen_fcond(r_cond, cc, cond);
+ }
+
+ l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+ if (IS_IMM) { /* immediate */
+ TCGv r_const;
+
+ simm = GET_FIELD_SPs(insn, 0, 10);
+ r_const = tcg_const_tl(simm);
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ } else {
+ rs2 = GET_FIELD_SP(insn, 0, 4);
+ gen_movl_reg_TN(rs2, cpu_tmp0);
+ gen_movl_TN_reg(rd, cpu_tmp0);
+ }
+ gen_set_label(l1);
+ tcg_temp_free(r_cond);
+ break;
+ }
+ case 0x2d: /* V9 sdivx */
+ gen_op_sdivx(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x2e: /* V9 popc */
+ {
+ cpu_src2 = get_src2(insn, cpu_src2);
+ gen_helper_popc(cpu_dst, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ }
+ case 0x2f: /* V9 movr */
+ {
+ int cond = GET_FIELD_SP(insn, 10, 12);
+ int l1;
+
+ cpu_src1 = get_src1(insn, cpu_src1);
+
+ l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond],
+ cpu_src1, 0, l1);
+ if (IS_IMM) { /* immediate */
+ TCGv r_const;
+
+ simm = GET_FIELD_SPs(insn, 0, 9);
+ r_const = tcg_const_tl(simm);
+ gen_movl_TN_reg(rd, r_const);
+ tcg_temp_free(r_const);
+ } else {
+ rs2 = GET_FIELD_SP(insn, 0, 4);
+ gen_movl_reg_TN(rs2, cpu_tmp0);
+ gen_movl_TN_reg(rd, cpu_tmp0);
+ }
+ gen_set_label(l1);
+ break;
+ }
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
+#ifdef TARGET_SPARC64
+ int opf = GET_FIELD_SP(insn, 5, 13);
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+
+ switch (opf) {
+ case 0x000: /* VIS I edge8cc */
+ case 0x001: /* VIS II edge8n */
+ case 0x002: /* VIS I edge8lcc */
+ case 0x003: /* VIS II edge8ln */
+ case 0x004: /* VIS I edge16cc */
+ case 0x005: /* VIS II edge16n */
+ case 0x006: /* VIS I edge16lcc */
+ case 0x007: /* VIS II edge16ln */
+ case 0x008: /* VIS I edge32cc */
+ case 0x009: /* VIS II edge32n */
+ case 0x00a: /* VIS I edge32lcc */
+ case 0x00b: /* VIS II edge32ln */
+ // XXX
+ goto illegal_insn;
+ case 0x010: /* VIS I array8 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x012: /* VIS I array16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x014: /* VIS I array32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x018: /* VIS I alignaddr */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ gen_helper_alignaddr(cpu_dst, cpu_src1, cpu_src2);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x019: /* VIS II bmask */
+ case 0x01a: /* VIS I alignaddrl */
+ // XXX
+ goto illegal_insn;
+ case 0x020: /* VIS I fcmple16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmple16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x022: /* VIS I fcmpne16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpne16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x024: /* VIS I fcmple32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmple32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x026: /* VIS I fcmpne32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpne32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x028: /* VIS I fcmpgt16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpgt16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x02a: /* VIS I fcmpeq16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpeq16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x02c: /* VIS I fcmpgt32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpgt32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x02e: /* VIS I fcmpeq32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fcmpeq32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x031: /* VIS I fmul8x16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmul8x16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x033: /* VIS I fmul8x16au */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmul8x16au();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x035: /* VIS I fmul8x16al */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmul8x16al();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x036: /* VIS I fmul8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmul8sux16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x037: /* VIS I fmul8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmul8ulx16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x038: /* VIS I fmuld8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmuld8sux16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x039: /* VIS I fmuld8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fmuld8ulx16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x03a: /* VIS I fpack32 */
+ case 0x03b: /* VIS I fpack16 */
+ case 0x03d: /* VIS I fpackfix */
+ case 0x03e: /* VIS I pdist */
+ // XXX
+ goto illegal_insn;
+ case 0x048: /* VIS I faligndata */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_faligndata();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x04b: /* VIS I fpmerge */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fpmerge();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x04c: /* VIS II bshuffle */
+ // XXX
+ goto illegal_insn;
+ case 0x04d: /* VIS I fexpand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fexpand();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x050: /* VIS I fpadd16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fpadd16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x051: /* VIS I fpadd16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_helper_fpadd16s(cpu_fpr[rd],
+ cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x052: /* VIS I fpadd32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fpadd32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x053: /* VIS I fpadd32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_helper_fpadd32s(cpu_fpr[rd],
+ cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x054: /* VIS I fpsub16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fpsub16();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x055: /* VIS I fpsub16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_helper_fpsub16s(cpu_fpr[rd],
+ cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x056: /* VIS I fpsub32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs1));
+ gen_op_load_fpr_DT1(DFPREG(rs2));
+ gen_helper_fpsub32();
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x057: /* VIS I fpsub32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_helper_fpsub32s(cpu_fpr[rd],
+ cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x060: /* VIS I fzero */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_movi_i32(cpu_fpr[DFPREG(rd)], 0);
+ tcg_gen_movi_i32(cpu_fpr[DFPREG(rd) + 1], 0);
+ break;
+ case 0x061: /* VIS I fzeros */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_movi_i32(cpu_fpr[rd], 0);
+ break;
+ case 0x062: /* VIS I fnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_nor_i32(cpu_tmp32, cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_nor_i32(cpu_tmp32, cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x063: /* VIS I fnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_nor_i32(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x064: /* VIS I fandnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_andc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_andc_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x065: /* VIS I fandnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_andc_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x066: /* VIS I fnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_not_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_not_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x067: /* VIS I fnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_not_i32(cpu_fpr[rd], cpu_fpr[rs2]);
+ break;
+ case 0x068: /* VIS I fandnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_andc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)],
+ cpu_fpr[DFPREG(rs1)]);
+ tcg_gen_andc_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs2) + 1],
+ cpu_fpr[DFPREG(rs1) + 1]);
+ break;
+ case 0x069: /* VIS I fandnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_andc_i32(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1]);
+ break;
+ case 0x06a: /* VIS I fnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_not_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)]);
+ tcg_gen_not_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1]);
+ break;
+ case 0x06b: /* VIS I fnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_not_i32(cpu_fpr[rd], cpu_fpr[rs1]);
+ break;
+ case 0x06c: /* VIS I fxor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_xor_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_xor_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x06d: /* VIS I fxors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_xor_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x06e: /* VIS I fnand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_nand_i32(cpu_tmp32, cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_nand_i32(cpu_tmp32, cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x06f: /* VIS I fnands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_nand_i32(cpu_tmp32, cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x070: /* VIS I fand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_and_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_and_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x071: /* VIS I fands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_and_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x072: /* VIS I fxnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[DFPREG(rs2)], -1);
+ tcg_gen_xor_i32(cpu_fpr[DFPREG(rd)], cpu_tmp32,
+ cpu_fpr[DFPREG(rs1)]);
+ tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[DFPREG(rs2) + 1], -1);
+ tcg_gen_xor_i32(cpu_fpr[DFPREG(rd) + 1], cpu_tmp32,
+ cpu_fpr[DFPREG(rs1) + 1]);
+ break;
+ case 0x073: /* VIS I fxnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[rs2], -1);
+ tcg_gen_xor_i32(cpu_fpr[rd], cpu_tmp32, cpu_fpr[rs1]);
+ break;
+ case 0x074: /* VIS I fsrc1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)]);
+ tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1]);
+ break;
+ case 0x075: /* VIS I fsrc1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs1]);
+ break;
+ case 0x076: /* VIS I fornot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_orc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_orc_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x077: /* VIS I fornot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_orc_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x078: /* VIS I fsrc2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_op_load_fpr_DT0(DFPREG(rs2));
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ break;
+ case 0x079: /* VIS I fsrc2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]);
+ break;
+ case 0x07a: /* VIS I fornot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_orc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)],
+ cpu_fpr[DFPREG(rs1)]);
+ tcg_gen_orc_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs2) + 1],
+ cpu_fpr[DFPREG(rs1) + 1]);
+ break;
+ case 0x07b: /* VIS I fornot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_orc_i32(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1]);
+ break;
+ case 0x07c: /* VIS I for */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_or_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)],
+ cpu_fpr[DFPREG(rs2)]);
+ tcg_gen_or_i32(cpu_fpr[DFPREG(rd) + 1],
+ cpu_fpr[DFPREG(rs1) + 1],
+ cpu_fpr[DFPREG(rs2) + 1]);
+ break;
+ case 0x07d: /* VIS I fors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_or_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]);
+ break;
+ case 0x07e: /* VIS I fone */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_movi_i32(cpu_fpr[DFPREG(rd)], -1);
+ tcg_gen_movi_i32(cpu_fpr[DFPREG(rd) + 1], -1);
+ break;
+ case 0x07f: /* VIS I fones */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ tcg_gen_movi_i32(cpu_fpr[rd], -1);
+ break;
+ case 0x080: /* VIS I shutdown */
+ case 0x081: /* VIS II siam */
+ // XXX
+ goto illegal_insn;
+ default:
+ goto illegal_insn;
+ }
+#else
+ goto ncp_insn;
+#endif
+ } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
+#ifdef TARGET_SPARC64
+ goto illegal_insn;
+#else
+ goto ncp_insn;
+#endif
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x39) { /* V9 return */
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ } else
+ tcg_gen_mov_tl(cpu_dst, cpu_src1);
+ }
+ gen_helper_restore();
+ gen_mov_pc_npc(dc, cpu_cond);
+ r_const = tcg_const_i32(3);
+ gen_helper_check_align(cpu_dst, r_const);
+ tcg_temp_free_i32(r_const);
+ tcg_gen_mov_tl(cpu_npc, cpu_dst);
+ dc->npc = DYNAMIC_PC;
+ goto jmp_insn;
+#endif
+ } else {
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ } else
+ tcg_gen_mov_tl(cpu_dst, cpu_src1);
+ }
+ switch (xop) {
+ case 0x38: /* jmpl */
+ {
+ TCGv r_pc;
+ TCGv_i32 r_const;
+
+ r_pc = tcg_const_tl(dc->pc);
+ gen_movl_TN_reg(rd, r_pc);
+ tcg_temp_free(r_pc);
+ gen_mov_pc_npc(dc, cpu_cond);
+ r_const = tcg_const_i32(3);
+ gen_helper_check_align(cpu_dst, r_const);
+ tcg_temp_free_i32(r_const);
+ tcg_gen_mov_tl(cpu_npc, cpu_dst);
+ dc->npc = DYNAMIC_PC;
+ }
+ goto jmp_insn;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ case 0x39: /* rett, V9 return */
+ {
+ TCGv_i32 r_const;
+
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_mov_pc_npc(dc, cpu_cond);
+ r_const = tcg_const_i32(3);
+ gen_helper_check_align(cpu_dst, r_const);
+ tcg_temp_free_i32(r_const);
+ tcg_gen_mov_tl(cpu_npc, cpu_dst);
+ dc->npc = DYNAMIC_PC;
+ gen_helper_rett();
+ }
+ goto jmp_insn;
+#endif
+ case 0x3b: /* flush */
+ if (!((dc)->def->features & CPU_FEATURE_FLUSH))
+ goto unimp_flush;
+ gen_helper_flush(cpu_dst);
+ break;
+ case 0x3c: /* save */
+ save_state(dc, cpu_cond);
+ gen_helper_save();
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+ case 0x3d: /* restore */
+ save_state(dc, cpu_cond);
+ gen_helper_restore();
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+ case 0x3e: /* V9 done/retry */
+ {
+ switch (rd) {
+ case 0:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ gen_helper_done();
+ goto jmp_insn;
+ case 1:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ gen_helper_retry();
+ goto jmp_insn;
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+ }
+ break;
+ case 3: /* load/store instructions */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+
+ /* flush pending conditional evaluations before exposing
+ cpu state */
+ if (dc->cc_op != CC_OP_FLAGS) {
+ dc->cc_op = CC_OP_FLAGS;
+ gen_helper_compute_psr();
+ }
+ cpu_src1 = get_src1(insn, cpu_src1);
+ if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
+ rs2 = GET_FIELD(insn, 27, 31);
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_mov_tl(cpu_addr, cpu_src1);
+ } else if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 != 0) {
+ gen_movl_reg_TN(rs2, cpu_src2);
+ tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
+ } else
+ tcg_gen_mov_tl(cpu_addr, cpu_src1);
+ }
+ if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
+ (xop > 0x17 && xop <= 0x1d ) ||
+ (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
+ switch (xop) {
+ case 0x0: /* ld, V9 lduw, load unsigned word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1: /* ldub, load unsigned byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x2: /* lduh, load unsigned halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x3: /* ldd, load double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ r_const = tcg_const_i32(7);
+ gen_helper_check_align(cpu_addr, r_const); // XXX remove
+ tcg_temp_free_i32(r_const);
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
+ tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
+ gen_movl_TN_reg(rd + 1, cpu_tmp0);
+ tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
+ tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
+ tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
+ }
+ break;
+ case 0x9: /* ldsb, load signed byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xa: /* ldsh, load signed halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xd: /* ldstub -- XXX: should be atomically */
+ {
+ TCGv r_const;
+
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+ r_const = tcg_const_tl(0xff);
+ tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
+ tcg_temp_free(r_const);
+ }
+ break;
+ case 0x0f: /* swap, swap register with memory. Also
+ atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+ gen_movl_reg_TN(rd, cpu_val);
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_mov_tl(cpu_val, cpu_tmp0);
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x10: /* lda, V9 lduwa, load word alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
+ break;
+ case 0x11: /* lduba, load unsigned byte alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
+ break;
+ case 0x12: /* lduha, load unsigned halfword alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
+ break;
+ case 0x13: /* ldda, load double word alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ if (rd & 1)
+ goto illegal_insn;
+ save_state(dc, cpu_cond);
+ gen_ldda_asi(cpu_val, cpu_addr, insn, rd);
+ goto skip_move;
+ case 0x19: /* ldsba, load signed byte alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
+ break;
+ case 0x1a: /* ldsha, load signed halfword alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
+ break;
+ case 0x1d: /* ldstuba -- XXX: should be atomically */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_ldstub_asi(cpu_val, cpu_addr, insn);
+ break;
+ case 0x1f: /* swapa, swap reg with alt. memory. Also
+ atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_movl_reg_TN(rd, cpu_val);
+ gen_swap_asi(cpu_val, cpu_addr, insn);
+ break;
+
+#ifndef TARGET_SPARC64
+ case 0x30: /* ldc */
+ case 0x31: /* ldcsr */
+ case 0x33: /* lddc */
+ goto ncp_insn;
+#endif
+#endif
+#ifdef TARGET_SPARC64
+ case 0x08: /* V9 ldsw */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x0b: /* V9 ldx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x18: /* V9 ldswa */
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
+ break;
+ case 0x1b: /* V9 ldxa */
+ save_state(dc, cpu_cond);
+ gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
+ break;
+ case 0x2d: /* V9 prefetch, no effect */
+ goto skip_move;
+ case 0x30: /* V9 ldfa */
+ save_state(dc, cpu_cond);
+ gen_ldf_asi(cpu_addr, insn, 4, rd);
+ goto skip_move;
+ case 0x33: /* V9 lddfa */
+ save_state(dc, cpu_cond);
+ gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
+ goto skip_move;
+ case 0x3d: /* V9 prefetcha, no effect */
+ goto skip_move;
+ case 0x32: /* V9 ldqfa */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ save_state(dc, cpu_cond);
+ gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
+ goto skip_move;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ gen_movl_TN_reg(rd, cpu_val);
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ skip_move: ;
+#endif
+ } else if (xop >= 0x20 && xop < 0x24) {
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ save_state(dc, cpu_cond);
+ switch (xop) {
+ case 0x20: /* ldf, load fpreg */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
+ tcg_gen_trunc_tl_i32(cpu_fpr[rd], cpu_tmp0);
+ break;
+ case 0x21: /* ldfsr, V9 ldxfsr */
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ if (rd == 1) {
+ tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
+ gen_helper_ldxfsr(cpu_tmp64);
+ } else {
+ tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ gen_helper_ldfsr(cpu_tmp32);
+ }
+#else
+ {
+ tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
+ gen_helper_ldfsr(cpu_tmp32);
+ }
+#endif
+ break;
+ case 0x22: /* ldqf, load quad fpreg */
+ {
+ TCGv_i32 r_const;
+
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ r_const = tcg_const_i32(dc->mem_idx);
+ gen_address_mask(dc, cpu_addr);
+ gen_helper_ldqf(cpu_addr, r_const);
+ tcg_temp_free_i32(r_const);
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ }
+ break;
+ case 0x23: /* lddf, load double fpreg */
+ {
+ TCGv_i32 r_const;
+
+ r_const = tcg_const_i32(dc->mem_idx);
+ gen_address_mask(dc, cpu_addr);
+ gen_helper_lddf(cpu_addr, r_const);
+ tcg_temp_free_i32(r_const);
+ gen_op_store_DT0_fpr(DFPREG(rd));
+ }
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
+ xop == 0xe || xop == 0x1e) {
+ gen_movl_reg_TN(rd, cpu_val);
+ switch (xop) {
+ case 0x4: /* st, store word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x5: /* stb, store byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x6: /* sth, store halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x7: /* std, store double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ gen_address_mask(dc, cpu_addr);
+ r_const = tcg_const_i32(7);
+ gen_helper_check_align(cpu_addr, r_const); // XXX remove
+ tcg_temp_free_i32(r_const);
+ gen_movl_reg_TN(rd + 1, cpu_tmp0);
+ tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, cpu_val);
+ tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x14: /* sta, V9 stwa, store word alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_st_asi(cpu_val, cpu_addr, insn, 4);
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 0x15: /* stba, store byte alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_st_asi(cpu_val, cpu_addr, insn, 1);
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 0x16: /* stha, store halfword alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ save_state(dc, cpu_cond);
+ gen_st_asi(cpu_val, cpu_addr, insn, 2);
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 0x17: /* stda, store double word alternate */
+#ifndef TARGET_SPARC64
+ if (IS_IMM)
+ goto illegal_insn;
+ if (!supervisor(dc))
+ goto priv_insn;
+#endif
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ save_state(dc, cpu_cond);
+ gen_stda_asi(cpu_val, cpu_addr, insn, rd);
+ }
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x0e: /* V9 stx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1e: /* V9 stxa */
+ save_state(dc, cpu_cond);
+ gen_st_asi(cpu_val, cpu_addr, insn, 8);
+ dc->npc = DYNAMIC_PC;
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x23 && xop < 0x28) {
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ save_state(dc, cpu_cond);
+ switch (xop) {
+ case 0x24: /* stf, store fpreg */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_fpr[rd]);
+ tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
+ break;
+ case 0x25: /* stfsr, V9 stxfsr */
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUState, fsr));
+ if (rd == 1)
+ tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
+ else
+ tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
+#else
+ tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUState, fsr));
+ tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
+#endif
+ break;
+ case 0x26:
+#ifdef TARGET_SPARC64
+ /* V9 stqf, store quad fpreg */
+ {
+ TCGv_i32 r_const;
+
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rd));
+ r_const = tcg_const_i32(dc->mem_idx);
+ gen_address_mask(dc, cpu_addr);
+ gen_helper_stqf(cpu_addr, r_const);
+ tcg_temp_free_i32(r_const);
+ }
+ break;
+#else /* !TARGET_SPARC64 */
+ /* stdfq, store floating point queue */
+#if defined(CONFIG_USER_ONLY)
+ goto illegal_insn;
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ if (gen_trap_ifnofpu(dc, cpu_cond))
+ goto jmp_insn;
+ goto nfq_insn;
+#endif
+#endif
+ case 0x27: /* stdf, store double fpreg */
+ {
+ TCGv_i32 r_const;
+
+ gen_op_load_fpr_DT0(DFPREG(rd));
+ r_const = tcg_const_i32(dc->mem_idx);
+ gen_address_mask(dc, cpu_addr);
+ gen_helper_stdf(cpu_addr, r_const);
+ tcg_temp_free_i32(r_const);
+ }
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x33 && xop < 0x3f) {
+ save_state(dc, cpu_cond);
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x34: /* V9 stfa */
+ gen_stf_asi(cpu_addr, insn, 4, rd);
+ break;
+ case 0x36: /* V9 stqfa */
+ {
+ TCGv_i32 r_const;
+
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ r_const = tcg_const_i32(7);
+ gen_helper_check_align(cpu_addr, r_const);
+ tcg_temp_free_i32(r_const);
+ gen_op_load_fpr_QT0(QFPREG(rd));
+ gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
+ }
+ break;
+ case 0x37: /* V9 stdfa */
+ gen_op_load_fpr_DT0(DFPREG(rd));
+ gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
+ break;
+ case 0x3c: /* V9 casa */
+ gen_cas_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
+ gen_movl_TN_reg(rd, cpu_val);
+ break;
+ case 0x3e: /* V9 casxa */
+ gen_casx_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
+ gen_movl_TN_reg(rd, cpu_val);
+ break;
+#else
+ case 0x34: /* stc */
+ case 0x35: /* stcsr */
+ case 0x36: /* stdcq */
+ case 0x37: /* stdc */
+ goto ncp_insn;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else
+ goto illegal_insn;
+ }
+ break;
+ }
+ /* default case for non jump instructions */
+ if (dc->npc == DYNAMIC_PC) {
+ dc->pc = DYNAMIC_PC;
+ gen_op_next_insn();
+ } else if (dc->npc == JUMP_PC) {
+ /* we can do a static jump */
+ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ dc->is_br = 1;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->npc + 4;
+ }
+ jmp_insn:
+ goto egress;
+ illegal_insn:
+ {
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ r_const = tcg_const_i32(TT_ILL_INSN);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ dc->is_br = 1;
+ }
+ goto egress;
+ unimp_flush:
+ {
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ r_const = tcg_const_i32(TT_UNIMP_FLUSH);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ dc->is_br = 1;
+ }
+ goto egress;
+#if !defined(CONFIG_USER_ONLY)
+ priv_insn:
+ {
+ TCGv_i32 r_const;
+
+ save_state(dc, cpu_cond);
+ r_const = tcg_const_i32(TT_PRIV_INSN);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free_i32(r_const);
+ dc->is_br = 1;
+ }
+ goto egress;
+#endif
+ nfpu_insn:
+ save_state(dc, cpu_cond);
+ gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
+ dc->is_br = 1;
+ goto egress;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ nfq_insn:
+ save_state(dc, cpu_cond);
+ gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
+ dc->is_br = 1;
+ goto egress;
+#endif
+#ifndef TARGET_SPARC64
+ ncp_insn:
+ {
+ TCGv r_const;
+
+ save_state(dc, cpu_cond);
+ r_const = tcg_const_i32(TT_NCP_INSN);
+ gen_helper_raise_exception(r_const);
+ tcg_temp_free(r_const);
+ dc->is_br = 1;
+ }
+ goto egress;
+#endif
+ egress:
+ tcg_temp_free(cpu_tmp1);
+ tcg_temp_free(cpu_tmp2);
+}
+
+static inline void gen_intermediate_code_internal(TranslationBlock * tb,
+ int spc, CPUSPARCState *env)
+{
+ target_ulong pc_start, last_pc;
+ uint16_t *gen_opc_end;
+ DisasContext dc1, *dc = &dc1;
+ CPUBreakpoint *bp;
+ int j, lj = -1;
+ int num_insns;
+ int max_insns;
+
+ memset(dc, 0, sizeof(DisasContext));
+ dc->tb = tb;
+ pc_start = tb->pc;
+ dc->pc = pc_start;
+ last_pc = dc->pc;
+ dc->npc = (target_ulong) tb->cs_base;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->mem_idx = cpu_mmu_index(env);
+ dc->def = env->def;
+ if ((dc->def->features & CPU_FEATURE_FLOAT))
+ dc->fpu_enabled = cpu_fpu_enabled(env);
+ else
+ dc->fpu_enabled = 0;
+#ifdef TARGET_SPARC64
+ dc->address_mask_32bit = env->pstate & PS_AM;
+#endif
+ dc->singlestep = (env->singlestep_enabled || singlestep);
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+
+ cpu_tmp0 = tcg_temp_new();
+ cpu_tmp32 = tcg_temp_new_i32();
+ cpu_tmp64 = tcg_temp_new_i64();
+
+ cpu_dst = tcg_temp_local_new();
+
+ // loads and stores
+ cpu_val = tcg_temp_local_new();
+ cpu_addr = tcg_temp_local_new();
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0)
+ max_insns = CF_COUNT_MASK;
+ gen_icount_start();
+ do {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ if (dc->pc != pc_start)
+ save_state(dc, cpu_cond);
+ gen_helper_debug();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ goto exit_gen_loop;
+ }
+ }
+ }
+ if (spc) {
+ qemu_log("Search PC...\n");
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j)
+ gen_opc_instr_start[lj++] = 0;
+ gen_opc_pc[lj] = dc->pc;
+ gen_opc_npc[lj] = dc->npc;
+ gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
+ }
+ }
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+ last_pc = dc->pc;
+ disas_sparc_insn(dc);
+ num_insns++;
+
+ if (dc->is_br)
+ break;
+ /* if the next PC is different, we abort now */
+ if (dc->pc != (last_pc + 4))
+ break;
+ /* if we reach a page boundary, we stop generation so that the
+ PC of a TT_TFAULT exception is always in the right page */
+ if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
+ break;
+ /* if single step mode, we generate only one instruction and
+ generate an exception */
+ if (dc->singlestep) {
+ break;
+ }
+ } while ((gen_opc_ptr < gen_opc_end) &&
+ (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
+ num_insns < max_insns);
+
+ exit_gen_loop:
+ tcg_temp_free(cpu_addr);
+ tcg_temp_free(cpu_val);
+ tcg_temp_free(cpu_dst);
+ tcg_temp_free_i64(cpu_tmp64);
+ tcg_temp_free_i32(cpu_tmp32);
+ tcg_temp_free(cpu_tmp0);
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ if (!dc->is_br) {
+ if (dc->pc != DYNAMIC_PC &&
+ (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
+ /* static PC and NPC: we can use direct chaining */
+ gen_goto_tb(dc, 0, dc->pc, dc->npc);
+ } else {
+ if (dc->pc != DYNAMIC_PC)
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ save_npc(dc, cpu_cond);
+ tcg_gen_exit_tb(0);
+ }
+ }
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+ if (spc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j)
+ gen_opc_instr_start[lj++] = 0;
+#if 0
+ log_page_dump();
+#endif
+ gen_opc_jump_pc[0] = dc->jump_pc[0];
+ gen_opc_jump_pc[1] = dc->jump_pc[1];
+ } else {
+ tb->size = last_pc + 4 - pc_start;
+ tb->icount = num_insns;
+ }
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("--------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
+ qemu_log("\n");
+ }
+#endif
+}
+
+void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
+{
+ gen_intermediate_code_internal(tb, 0, env);
+}
+
+void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
+{
+ gen_intermediate_code_internal(tb, 1, env);
+}
+
+void gen_intermediate_code_init(CPUSPARCState *env)
+{
+ unsigned int i;
+ static int inited;
+ static const char * const gregnames[8] = {
+ NULL, // g0 not used
+ "g1",
+ "g2",
+ "g3",
+ "g4",
+ "g5",
+ "g6",
+ "g7",
+ };
+ static const char * const fregnames[64] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+ "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
+ "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
+ "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
+ "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
+ };
+
+ /* init various static tables */
+ if (!inited) {
+ inited = 1;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
+ offsetof(CPUState, regwptr),
+ "regwptr");
+#ifdef TARGET_SPARC64
+ cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, xcc),
+ "xcc");
+ cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, asi),
+ "asi");
+ cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, fprs),
+ "fprs");
+ cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, gsr),
+ "gsr");
+ cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, tick_cmpr),
+ "tick_cmpr");
+ cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, stick_cmpr),
+ "stick_cmpr");
+ cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, hstick_cmpr),
+ "hstick_cmpr");
+ cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, hintp),
+ "hintp");
+ cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, htba),
+ "htba");
+ cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, hver),
+ "hver");
+ cpu_ssr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, ssr), "ssr");
+ cpu_ver = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, version), "ver");
+ cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, softint),
+ "softint");
+#else
+ cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, wim),
+ "wim");
+#endif
+ cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cond),
+ "cond");
+ cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
+ "cc_src");
+ cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, cc_src2),
+ "cc_src2");
+ cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
+ "cc_dst");
+ cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc_op),
+ "cc_op");
+ cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, psr),
+ "psr");
+ cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, fsr),
+ "fsr");
+ cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, pc),
+ "pc");
+ cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, npc),
+ "npc");
+ cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, y), "y");
+#ifndef CONFIG_USER_ONLY
+ cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, tbr),
+ "tbr");
+#endif
+ for (i = 1; i < 8; i++)
+ cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, gregs[i]),
+ gregnames[i]);
+ for (i = 0; i < TARGET_FPREGS; i++)
+ cpu_fpr[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, fpr[i]),
+ fregnames[i]);
+
+ /* register helpers */
+
+#define GEN_HELPER 2
+#include "helper.h"
+ }
+}
+
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ target_ulong npc;
+ env->pc = gen_opc_pc[pc_pos];
+ npc = gen_opc_npc[pc_pos];
+ if (npc == 1) {
+ /* dynamic NPC: already stored */
+ } else if (npc == 2) {
+ /* jump PC: use 'cond' and the jump targets of the translation */
+ if (env->cond) {
+ env->npc = gen_opc_jump_pc[0];
+ } else {
+ env->npc = gen_opc_jump_pc[1];
+ }
+ } else {
+ env->npc = npc;
+ }
+
+ /* flush pending conditional evaluations before exposing cpu state */
+ if (CC_OP != CC_OP_FLAGS) {
+ helper_compute_psr();
+ }
+}