summaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/STATUS559
-rw-r--r--target-ppc/cpu.h1634
-rw-r--r--target-ppc/exec.h57
-rw-r--r--target-ppc/fake-exec.c104
-rw-r--r--target-ppc/helper.c2822
-rw-r--r--target-ppc/helper.h404
-rw-r--r--target-ppc/helper_regs.h111
-rw-r--r--target-ppc/kvm.c406
-rw-r--r--target-ppc/kvm_ppc.c105
-rw-r--r--target-ppc/kvm_ppc.h33
-rw-r--r--target-ppc/libkvm.c102
-rw-r--r--target-ppc/libkvm.h36
-rw-r--r--target-ppc/machine.c180
-rw-r--r--target-ppc/mfrom_table.c79
-rw-r--r--target-ppc/mfrom_table_gen.c33
-rw-r--r--target-ppc/op_helper.c4172
-rw-r--r--target-ppc/translate.c9141
-rw-r--r--target-ppc/translate_init.c9768
18 files changed, 29746 insertions, 0 deletions
diff --git a/target-ppc/STATUS b/target-ppc/STATUS
new file mode 100644
index 0000000..32e7ffa
--- /dev/null
+++ b/target-ppc/STATUS
@@ -0,0 +1,559 @@
+PowerPC emulation status.
+The goal of this file is to provide a reference status to avoid regressions.
+
+===============================================================================
+PowerPC core emulation status
+
+INSN: instruction set.
+ OK => all instructions are emulated
+ KO => some insns are missing or some should be removed
+ ? => unchecked
+SPR: special purpose registers set
+ OK => all SPR registered (but some may be fake)
+ KO => some SPR are missing or should be removed
+ ? => uncheked
+MSR: MSR bits definitions
+ OK => all MSR bits properly defined
+ KO => MSR definition is incorrect
+ ? => unchecked
+IRQ: input signals definitions (mostly interrupts)
+ OK => input signals are properly defined
+ KO => input signals are not implemented (system emulation does not work)
+ ? => input signals definitions may be incorrect
+MMU: MMU model implementation
+ OK => MMU model is implemented and Linux is able to boot
+ KO => MMU model not implemented or bugged
+ ? => MMU model not tested
+EXCP: exceptions model implementation
+ OK => exception model is implemented and Linux is able to boot
+ KO => exception model not implemented or known to be buggy
+ ? => exception model may be incorrect or is untested
+
+Embedded PowerPC cores
+***
+PowerPC 401:
+INSN OK
+SPR OK 401A1
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+PowerPC 401x2:
+INSN OK
+SPR OK 401B2 401C2 401D2 401E2 401F2
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+PowerPC IOP480:
+INSN OK
+SPR OK IOP480
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+To be checked: 401G2 401B3 Cobra
+
+***
+PowerPC 403:
+INSN OK
+SPR OK 403GA 403GB
+MMU OK
+MSR OK
+IRQ KO not implemented
+EXCP ?
+
+PowerPC 403GCX:
+INSN OK
+SPR OK 403GCX
+MMU OK
+MSR OK
+IRQ KO not implemented
+EXCP ?
+
+To be checked: 403GC
+
+***
+PowerPC 405:
+Checked: 405CRa 405CRb 405CRc 405EP 405GPa 405GPb 405GPc 405GPd 405GPe 405GPR
+ Npe405H Npe405H2 Npe405L
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots (at least 1 proprietary firmware).
+ uboot seems to freeze at boot time.
+To be checked: 405D2 405D4 405EZ 405LP Npe4GS3 STB03 STB04 STB25
+ x2vp4 x2vp7 x2vp20 x2vp50
+
+XXX: find what is IBM e407b4
+
+***
+PowerPC 440:
+Checked: 440EPa 440EPb 440GXa 440GXb 440GXc 440GXf 440SP 440SP2
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440GP:
+Checked: 440GPb 440GPc
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440x4:
+Checked: 440A4 440B4 440G4 440H4
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440x5:
+Checked: 440A5 440F5 440G5 440H6 440GRa
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+To be checked: 440EPx 440GRx 440SPE
+
+***
+PowerPC 460: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+PowerPC 460F: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e200: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e300: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e500: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e600: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+32 bits PowerPC
+PowerPC 601: (601 601v2)
+INSN OK
+SPR OK is HID15 only on 601v2 ?
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+Remarks: some instructions should have a specific behavior (not implemented)
+
+PowerPC 602: 602
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU ?
+EXCP ? at least timer and external interrupt are OK
+Remarks: Linux 2.4 crashes when entering user-mode.
+ Linux 2.6.22 boots on this CPU but does not recognize it.
+
+PowerPC 603: (603)
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU
+ Linux 2.6.22 idem.
+
+PowerPC 603e: (603e11)
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU
+ Linux 2.6.22 idem.
+
+PowerPC G2:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots, recognizes the CPU as a 82xx.
+ Linux 2.6.22 idem.
+
+PowerPC G2le:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 does not boots. Same symptoms as 602.
+ Linux 2.6.22 boots and properly recognizes the CPU.
+
+PowerPC 604:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 7x0:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 750fx:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots but does not properly recognizes the CPU.
+ Linux 2.6.22 boots and properly recognizes the CPU.
+
+PowerPC 7x5:
+INSN ?
+SPR ?
+MSR ?
+IRQ OK
+MMU ?
+EXCP OK
+Remarks: Linux 2.4 does not boot.
+ Linux 2.6.22 idem.
+
+PowerPC 7400:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux 2.4 boots and properly recognize the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 7410:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux 2.4 boots and properly recognize the CPU.
+ Linux 2.6.22 idem.
+ Note that UM says tlbld & tlbli are implemented but this may be a mistake
+ as TLB loads are managed by the hardware and the CPU does not implement the
+ needed registers.
+
+PowerPC 7441:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7450/7451:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7445/7447:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7455/7457:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+64 bits PowerPC
+PowerPC 620: (disabled)
+INSN KO
+SPR KO
+MSR ?
+IRQ KO
+MMU KO
+EXCP KO
+Remarks: not much documentation for this implementation...
+
+PowerPC 970:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ OK
+MMU OK
+EXCP KO partially implemented
+Remarks: Should be able to boot but there is no hw platform currently emulated.
+
+PowerPC 970FX:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ OK
+MMU OK
+EXCP KO partially implemented
+Remarks: Should be able to boot but there is no hw platform currently emulated.
+
+PowerPC 970GX:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ OK
+MMU OK
+EXCP KO partially implemented
+Remarks: Should be able to boot but there is no hw platform currently emulated.
+
+PowerPC Cell:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ ?
+MMU ?
+EXCP ? partially implemented
+Remarks: As the core is mostly a 970, should be able to boot.
+ SPE are not implemented.
+
+PowerPC 630: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+PowerPC 631: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER4: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER4+: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER5: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER5+: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER6: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-II: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-III: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-IV: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+Original POWER
+POWER: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER2: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+===============================================================================
+PowerPC microcontrollers emulation status
+
+Implemementation should be sufficient to boot Linux:
+(there seem to be problems with uboot freezing at some point)
+- PowerPC 405CR
+- PowerPC 405EP
+
+TODO:
+- PowerPC 401 microcontrollers emulation
+- PowerPC 403 microcontrollers emulation
+- more PowerPC 405 microcontrollers emulation
+- Fixes / more features for implemented PowerPC 405 microcontrollers emulation
+- PowerPC 440 microcontrollers emulation
+- e200 microcontrollers emulation
+- e300 microcontrollers emulation
+- e500 microcontrollers emulation
+- e600 microcontrollers emulation
+
+===============================================================================
+PowerPC based platforms emulation status
+
+* PREP platform (RS/6000 7043...) - TO BE CHECKED (broken)
+- Gentoo Linux live CDROM 1.4
+- Debian Linux 3.0
+- Mandrake Linux 9
+
+* heathrow PowerMac platform (beige PowerMac) - TO BE CHECKED (broken)
+- Gentoo Linux live CDROM 1.4
+- Debian Linux 3.0
+- Mandrake Linux 9
+
+* mac99 platform (white and blue PowerMac, ...)
+- Gentoo Linux live CDROM 1.4 - boots, compiles linux kernel
+- Debian Linux woody - boots from CDROM and HDD
+- Mandrake Linux 9 - boots from CDROM, freezes during install
+- Knoppix 2003-07-13_4 boots from CDROM, pb with X configuration
+ distribution bug: X runs with a properly hand-coded configuration.
+- rock Linux 2.0 runs from CDROM
+
+* Linux 2.6 support seems deadly broken (used to boot...).
+
+* PowerPC 405EP reference boards:
+- can boot Linux 2.4 & 2.6.
+ Need to provide a flash image ready to boot for reproductible tests.
+
+TODO:
+- URGENT: fix PreP and heathrow platforms
+- PowerPC 64 reference platform
+- MCA based RS/6000 emulation
+- CHRP emulation (not PowerMac)
+- PPAR emulation
+- ePPAR emulation
+- misc PowerPC reference boards emulation
+
+===============================================================================
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
new file mode 100644
index 0000000..22b14d8
--- /dev/null
+++ b/target-ppc/cpu.h
@@ -0,0 +1,1634 @@
+/*
+ * PowerPC emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if !defined (__CPU_PPC_H__)
+#define __CPU_PPC_H__
+
+#include "config.h"
+#include "qemu-common.h"
+
+//#define PPC_EMULATE_32BITS_HYPV
+
+#if defined (TARGET_PPC64)
+/* PowerPC 64 definitions */
+#define TARGET_LONG_BITS 64
+#define TARGET_PAGE_BITS 12
+
+/* Note that the official physical address space bits is 62-M where M
+ is implementation dependent. I've not looked up M for the set of
+ cpus we emulate at the system level. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 62
+
+/* Note that the PPC environment architecture talks about 80 bit virtual
+ addresses, with segmentation. Obviously that's not all visible to a
+ single process, which is all we're concerned with here. */
+#ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+#endif
+
+#else /* defined (TARGET_PPC64) */
+/* PowerPC 32 definitions */
+#define TARGET_LONG_BITS 32
+
+#if defined(TARGET_PPCEMB)
+/* Specific definitions for PowerPC embedded */
+/* BookE have 36 bits physical address space */
+#if defined(CONFIG_USER_ONLY) || defined(USE_KVM)
+/* It looks like a lot of Linux programs assume page size
+ * is 4kB long. This is evil, but we have to deal with it...
+ * Also kvm for embedded powerpc needs (atm) 4kB aligned pages
+ */
+#define TARGET_PAGE_BITS 12
+#else /* defined(CONFIG_USER_ONLY) */
+/* Pages can be 1 kB small */
+#define TARGET_PAGE_BITS 10
+#endif /* defined(CONFIG_USER_ONLY) */
+#else /* defined(TARGET_PPCEMB) */
+/* "standard" PowerPC 32 definitions */
+#define TARGET_PAGE_BITS 12
+#endif /* defined(TARGET_PPCEMB) */
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#endif /* defined (TARGET_PPC64) */
+
+#define CPUState struct CPUPPCState
+
+#include "cpu-defs.h"
+
+#include <setjmp.h>
+
+#include "softfloat.h"
+
+#define TARGET_HAS_ICE 1
+
+#if defined (TARGET_PPC64)
+#define ELF_MACHINE EM_PPC64
+#else
+#define ELF_MACHINE EM_PPC
+#endif
+
+/*****************************************************************************/
+/* MMU model */
+typedef enum powerpc_mmu_t powerpc_mmu_t;
+enum powerpc_mmu_t {
+ POWERPC_MMU_UNKNOWN = 0x00000000,
+ /* Standard 32 bits PowerPC MMU */
+ POWERPC_MMU_32B = 0x00000001,
+ /* PowerPC 6xx MMU with software TLB */
+ POWERPC_MMU_SOFT_6xx = 0x00000002,
+ /* PowerPC 74xx MMU with software TLB */
+ POWERPC_MMU_SOFT_74xx = 0x00000003,
+ /* PowerPC 4xx MMU with software TLB */
+ POWERPC_MMU_SOFT_4xx = 0x00000004,
+ /* PowerPC 4xx MMU with software TLB and zones protections */
+ POWERPC_MMU_SOFT_4xx_Z = 0x00000005,
+ /* PowerPC MMU in real mode only */
+ POWERPC_MMU_REAL = 0x00000006,
+ /* Freescale MPC8xx MMU model */
+ POWERPC_MMU_MPC8xx = 0x00000007,
+ /* BookE MMU model */
+ POWERPC_MMU_BOOKE = 0x00000008,
+ /* BookE FSL MMU model */
+ POWERPC_MMU_BOOKE_FSL = 0x00000009,
+ /* PowerPC 601 MMU model (specific BATs format) */
+ POWERPC_MMU_601 = 0x0000000A,
+#if defined(TARGET_PPC64)
+#define POWERPC_MMU_64 0x00010000
+ /* 64 bits PowerPC MMU */
+ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
+ /* 620 variant (no segment exceptions) */
+ POWERPC_MMU_620 = POWERPC_MMU_64 | 0x00000002,
+#endif /* defined(TARGET_PPC64) */
+};
+
+/*****************************************************************************/
+/* Exception model */
+typedef enum powerpc_excp_t powerpc_excp_t;
+enum powerpc_excp_t {
+ POWERPC_EXCP_UNKNOWN = 0,
+ /* Standard PowerPC exception model */
+ POWERPC_EXCP_STD,
+ /* PowerPC 40x exception model */
+ POWERPC_EXCP_40x,
+ /* PowerPC 601 exception model */
+ POWERPC_EXCP_601,
+ /* PowerPC 602 exception model */
+ POWERPC_EXCP_602,
+ /* PowerPC 603 exception model */
+ POWERPC_EXCP_603,
+ /* PowerPC 603e exception model */
+ POWERPC_EXCP_603E,
+ /* PowerPC G2 exception model */
+ POWERPC_EXCP_G2,
+ /* PowerPC 604 exception model */
+ POWERPC_EXCP_604,
+ /* PowerPC 7x0 exception model */
+ POWERPC_EXCP_7x0,
+ /* PowerPC 7x5 exception model */
+ POWERPC_EXCP_7x5,
+ /* PowerPC 74xx exception model */
+ POWERPC_EXCP_74xx,
+ /* BookE exception model */
+ POWERPC_EXCP_BOOKE,
+#if defined(TARGET_PPC64)
+ /* PowerPC 970 exception model */
+ POWERPC_EXCP_970,
+#endif /* defined(TARGET_PPC64) */
+};
+
+/*****************************************************************************/
+/* Exception vectors definitions */
+enum {
+ POWERPC_EXCP_NONE = -1,
+ /* The 64 first entries are used by the PowerPC embedded specification */
+ POWERPC_EXCP_CRITICAL = 0, /* Critical input */
+ POWERPC_EXCP_MCHECK = 1, /* Machine check exception */
+ POWERPC_EXCP_DSI = 2, /* Data storage exception */
+ POWERPC_EXCP_ISI = 3, /* Instruction storage exception */
+ POWERPC_EXCP_EXTERNAL = 4, /* External input */
+ POWERPC_EXCP_ALIGN = 5, /* Alignment exception */
+ POWERPC_EXCP_PROGRAM = 6, /* Program exception */
+ POWERPC_EXCP_FPU = 7, /* Floating-point unavailable exception */
+ POWERPC_EXCP_SYSCALL = 8, /* System call exception */
+ POWERPC_EXCP_APU = 9, /* Auxiliary processor unavailable */
+ POWERPC_EXCP_DECR = 10, /* Decrementer exception */
+ POWERPC_EXCP_FIT = 11, /* Fixed-interval timer interrupt */
+ POWERPC_EXCP_WDT = 12, /* Watchdog timer interrupt */
+ POWERPC_EXCP_DTLB = 13, /* Data TLB miss */
+ POWERPC_EXCP_ITLB = 14, /* Instruction TLB miss */
+ POWERPC_EXCP_DEBUG = 15, /* Debug interrupt */
+ /* Vectors 16 to 31 are reserved */
+ POWERPC_EXCP_SPEU = 32, /* SPE/embedded floating-point unavailable */
+ POWERPC_EXCP_EFPDI = 33, /* Embedded floating-point data interrupt */
+ POWERPC_EXCP_EFPRI = 34, /* Embedded floating-point round interrupt */
+ POWERPC_EXCP_EPERFM = 35, /* Embedded performance monitor interrupt */
+ POWERPC_EXCP_DOORI = 36, /* Embedded doorbell interrupt */
+ POWERPC_EXCP_DOORCI = 37, /* Embedded doorbell critical interrupt */
+ /* Vectors 38 to 63 are reserved */
+ /* Exceptions defined in the PowerPC server specification */
+ POWERPC_EXCP_RESET = 64, /* System reset exception */
+ POWERPC_EXCP_DSEG = 65, /* Data segment exception */
+ POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */
+ POWERPC_EXCP_HDECR = 67, /* Hypervisor decrementer exception */
+ POWERPC_EXCP_TRACE = 68, /* Trace exception */
+ POWERPC_EXCP_HDSI = 69, /* Hypervisor data storage exception */
+ POWERPC_EXCP_HISI = 70, /* Hypervisor instruction storage exception */
+ POWERPC_EXCP_HDSEG = 71, /* Hypervisor data segment exception */
+ POWERPC_EXCP_HISEG = 72, /* Hypervisor instruction segment exception */
+ POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */
+ /* 40x specific exceptions */
+ POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */
+ /* 601 specific exceptions */
+ POWERPC_EXCP_IO = 75, /* IO error exception */
+ POWERPC_EXCP_RUNM = 76, /* Run mode exception */
+ /* 602 specific exceptions */
+ POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */
+ /* 602/603 specific exceptions */
+ POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */
+ POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */
+ POWERPC_EXCP_DSTLB = 80, /* Data store TLB miss */
+ /* Exceptions available on most PowerPC */
+ POWERPC_EXCP_FPA = 81, /* Floating-point assist exception */
+ POWERPC_EXCP_DABR = 82, /* Data address breakpoint */
+ POWERPC_EXCP_IABR = 83, /* Instruction address breakpoint */
+ POWERPC_EXCP_SMI = 84, /* System management interrupt */
+ POWERPC_EXCP_PERFM = 85, /* Embedded performance monitor interrupt */
+ /* 7xx/74xx specific exceptions */
+ POWERPC_EXCP_THERM = 86, /* Thermal interrupt */
+ /* 74xx specific exceptions */
+ POWERPC_EXCP_VPUA = 87, /* Vector assist exception */
+ /* 970FX specific exceptions */
+ POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */
+ POWERPC_EXCP_MAINT = 89, /* Maintenance exception */
+ /* Freescale embeded cores specific exceptions */
+ POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */
+ POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */
+ POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */
+ POWERPC_EXCP_DTLBE = 93, /* Data TLB error */
+ /* EOL */
+ POWERPC_EXCP_NB = 96,
+ /* Qemu exceptions: used internally during code translation */
+ POWERPC_EXCP_STOP = 0x200, /* stop translation */
+ POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */
+ /* Qemu exceptions: special cases we want to stop translation */
+ POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */
+ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
+ POWERPC_EXCP_STCX = 0x204 /* Conditional stores in user mode */
+};
+
+/* Exceptions error codes */
+enum {
+ /* Exception subtypes for POWERPC_EXCP_ALIGN */
+ POWERPC_EXCP_ALIGN_FP = 0x01, /* FP alignment exception */
+ POWERPC_EXCP_ALIGN_LST = 0x02, /* Unaligned mult/extern load/store */
+ POWERPC_EXCP_ALIGN_LE = 0x03, /* Multiple little-endian access */
+ POWERPC_EXCP_ALIGN_PROT = 0x04, /* Access cross protection boundary */
+ POWERPC_EXCP_ALIGN_BAT = 0x05, /* Access cross a BAT/seg boundary */
+ POWERPC_EXCP_ALIGN_CACHE = 0x06, /* Impossible dcbz access */
+ /* Exception subtypes for POWERPC_EXCP_PROGRAM */
+ /* FP exceptions */
+ POWERPC_EXCP_FP = 0x10,
+ POWERPC_EXCP_FP_OX = 0x01, /* FP overflow */
+ POWERPC_EXCP_FP_UX = 0x02, /* FP underflow */
+ POWERPC_EXCP_FP_ZX = 0x03, /* FP divide by zero */
+ POWERPC_EXCP_FP_XX = 0x04, /* FP inexact */
+ POWERPC_EXCP_FP_VXSNAN = 0x05, /* FP invalid SNaN op */
+ POWERPC_EXCP_FP_VXISI = 0x06, /* FP invalid infinite subtraction */
+ POWERPC_EXCP_FP_VXIDI = 0x07, /* FP invalid infinite divide */
+ POWERPC_EXCP_FP_VXZDZ = 0x08, /* FP invalid zero divide */
+ POWERPC_EXCP_FP_VXIMZ = 0x09, /* FP invalid infinite * zero */
+ POWERPC_EXCP_FP_VXVC = 0x0A, /* FP invalid compare */
+ POWERPC_EXCP_FP_VXSOFT = 0x0B, /* FP invalid operation */
+ POWERPC_EXCP_FP_VXSQRT = 0x0C, /* FP invalid square root */
+ POWERPC_EXCP_FP_VXCVI = 0x0D, /* FP invalid integer conversion */
+ /* Invalid instruction */
+ POWERPC_EXCP_INVAL = 0x20,
+ POWERPC_EXCP_INVAL_INVAL = 0x01, /* Invalid instruction */
+ POWERPC_EXCP_INVAL_LSWX = 0x02, /* Invalid lswx instruction */
+ POWERPC_EXCP_INVAL_SPR = 0x03, /* Invalid SPR access */
+ POWERPC_EXCP_INVAL_FP = 0x04, /* Unimplemented mandatory fp instr */
+ /* Privileged instruction */
+ POWERPC_EXCP_PRIV = 0x30,
+ POWERPC_EXCP_PRIV_OPC = 0x01, /* Privileged operation exception */
+ POWERPC_EXCP_PRIV_REG = 0x02, /* Privileged register exception */
+ /* Trap */
+ POWERPC_EXCP_TRAP = 0x40,
+};
+
+/*****************************************************************************/
+/* Input pins model */
+typedef enum powerpc_input_t powerpc_input_t;
+enum powerpc_input_t {
+ PPC_FLAGS_INPUT_UNKNOWN = 0,
+ /* PowerPC 6xx bus */
+ PPC_FLAGS_INPUT_6xx,
+ /* BookE bus */
+ PPC_FLAGS_INPUT_BookE,
+ /* PowerPC 405 bus */
+ PPC_FLAGS_INPUT_405,
+ /* PowerPC 970 bus */
+ PPC_FLAGS_INPUT_970,
+ /* PowerPC 401 bus */
+ PPC_FLAGS_INPUT_401,
+ /* Freescale RCPU bus */
+ PPC_FLAGS_INPUT_RCPU,
+};
+
+#define PPC_INPUT(env) (env->bus_model)
+
+/*****************************************************************************/
+typedef struct ppc_def_t ppc_def_t;
+typedef struct opc_handler_t opc_handler_t;
+
+/*****************************************************************************/
+/* Types used to describe some PowerPC registers */
+typedef struct CPUPPCState CPUPPCState;
+typedef struct ppc_tb_t ppc_tb_t;
+typedef struct ppc_spr_t ppc_spr_t;
+typedef struct ppc_dcr_t ppc_dcr_t;
+typedef union ppc_avr_t ppc_avr_t;
+typedef union ppc_tlb_t ppc_tlb_t;
+
+/* SPR access micro-ops generations callbacks */
+struct ppc_spr_t {
+ void (*uea_read)(void *opaque, int gpr_num, int spr_num);
+ void (*uea_write)(void *opaque, int spr_num, int gpr_num);
+#if !defined(CONFIG_USER_ONLY)
+ void (*oea_read)(void *opaque, int gpr_num, int spr_num);
+ void (*oea_write)(void *opaque, int spr_num, int gpr_num);
+ void (*hea_read)(void *opaque, int gpr_num, int spr_num);
+ void (*hea_write)(void *opaque, int spr_num, int gpr_num);
+#endif
+ const char *name;
+};
+
+/* Altivec registers (128 bits) */
+union ppc_avr_t {
+ float32 f[4];
+ uint8_t u8[16];
+ uint16_t u16[8];
+ uint32_t u32[4];
+ int8_t s8[16];
+ int16_t s16[8];
+ int32_t s32[4];
+ uint64_t u64[2];
+};
+
+#if !defined(CONFIG_USER_ONLY)
+/* Software TLB cache */
+typedef struct ppc6xx_tlb_t ppc6xx_tlb_t;
+struct ppc6xx_tlb_t {
+ target_ulong pte0;
+ target_ulong pte1;
+ target_ulong EPN;
+};
+
+typedef struct ppcemb_tlb_t ppcemb_tlb_t;
+struct ppcemb_tlb_t {
+ target_phys_addr_t RPN;
+ target_ulong EPN;
+ target_ulong PID;
+ target_ulong size;
+ uint32_t prot;
+ uint32_t attr; /* Storage attributes */
+};
+
+union ppc_tlb_t {
+ ppc6xx_tlb_t tlb6;
+ ppcemb_tlb_t tlbe;
+};
+#endif
+
+typedef struct ppc_slb_t ppc_slb_t;
+struct ppc_slb_t {
+ uint64_t tmp64;
+ uint32_t tmp;
+};
+
+/*****************************************************************************/
+/* Machine state register bits definition */
+#define MSR_SF 63 /* Sixty-four-bit mode hflags */
+#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
+#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
+#define MSR_SHV 60 /* hypervisor state hflags */
+#define MSR_CM 31 /* Computation mode for BookE hflags */
+#define MSR_ICM 30 /* Interrupt computation mode for BookE */
+#define MSR_THV 29 /* hypervisor state for 32 bits PowerPC hflags */
+#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
+#define MSR_VR 25 /* altivec available x hflags */
+#define MSR_SPE 25 /* SPE enable for BookE x hflags */
+#define MSR_AP 23 /* Access privilege state on 602 hflags */
+#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
+#define MSR_KEY 19 /* key bit on 603e */
+#define MSR_POW 18 /* Power management */
+#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
+#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
+#define MSR_ILE 16 /* Interrupt little-endian mode */
+#define MSR_EE 15 /* External interrupt enable */
+#define MSR_PR 14 /* Problem state hflags */
+#define MSR_FP 13 /* Floating point available hflags */
+#define MSR_ME 12 /* Machine check interrupt enable */
+#define MSR_FE0 11 /* Floating point exception mode 0 hflags */
+#define MSR_SE 10 /* Single-step trace enable x hflags */
+#define MSR_DWE 10 /* Debug wait enable on 405 x */
+#define MSR_UBLE 10 /* User BTB lock enable on e500 x */
+#define MSR_BE 9 /* Branch trace enable x hflags */
+#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */
+#define MSR_FE1 8 /* Floating point exception mode 1 hflags */
+#define MSR_AL 7 /* AL bit on POWER */
+#define MSR_EP 6 /* Exception prefix on 601 */
+#define MSR_IR 5 /* Instruction relocate */
+#define MSR_DR 4 /* Data relocate */
+#define MSR_PE 3 /* Protection enable on 403 */
+#define MSR_PX 2 /* Protection exclusive on 403 x */
+#define MSR_PMM 2 /* Performance monitor mark on POWER x */
+#define MSR_RI 1 /* Recoverable interrupt 1 */
+#define MSR_LE 0 /* Little-endian mode 1 hflags */
+
+#define msr_sf ((env->msr >> MSR_SF) & 1)
+#define msr_isf ((env->msr >> MSR_ISF) & 1)
+#define msr_shv ((env->msr >> MSR_SHV) & 1)
+#define msr_cm ((env->msr >> MSR_CM) & 1)
+#define msr_icm ((env->msr >> MSR_ICM) & 1)
+#define msr_thv ((env->msr >> MSR_THV) & 1)
+#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
+#define msr_vr ((env->msr >> MSR_VR) & 1)
+#define msr_spe ((env->msr >> MSR_SPE) & 1)
+#define msr_ap ((env->msr >> MSR_AP) & 1)
+#define msr_sa ((env->msr >> MSR_SA) & 1)
+#define msr_key ((env->msr >> MSR_KEY) & 1)
+#define msr_pow ((env->msr >> MSR_POW) & 1)
+#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
+#define msr_ce ((env->msr >> MSR_CE) & 1)
+#define msr_ile ((env->msr >> MSR_ILE) & 1)
+#define msr_ee ((env->msr >> MSR_EE) & 1)
+#define msr_pr ((env->msr >> MSR_PR) & 1)
+#define msr_fp ((env->msr >> MSR_FP) & 1)
+#define msr_me ((env->msr >> MSR_ME) & 1)
+#define msr_fe0 ((env->msr >> MSR_FE0) & 1)
+#define msr_se ((env->msr >> MSR_SE) & 1)
+#define msr_dwe ((env->msr >> MSR_DWE) & 1)
+#define msr_uble ((env->msr >> MSR_UBLE) & 1)
+#define msr_be ((env->msr >> MSR_BE) & 1)
+#define msr_de ((env->msr >> MSR_DE) & 1)
+#define msr_fe1 ((env->msr >> MSR_FE1) & 1)
+#define msr_al ((env->msr >> MSR_AL) & 1)
+#define msr_ep ((env->msr >> MSR_EP) & 1)
+#define msr_ir ((env->msr >> MSR_IR) & 1)
+#define msr_dr ((env->msr >> MSR_DR) & 1)
+#define msr_pe ((env->msr >> MSR_PE) & 1)
+#define msr_px ((env->msr >> MSR_PX) & 1)
+#define msr_pmm ((env->msr >> MSR_PMM) & 1)
+#define msr_ri ((env->msr >> MSR_RI) & 1)
+#define msr_le ((env->msr >> MSR_LE) & 1)
+/* Hypervisor bit is more specific */
+#if defined(TARGET_PPC64)
+#define MSR_HVB (1ULL << MSR_SHV)
+#define msr_hv msr_shv
+#else
+#if defined(PPC_EMULATE_32BITS_HYPV)
+#define MSR_HVB (1ULL << MSR_THV)
+#define msr_hv msr_thv
+#else
+#define MSR_HVB (0ULL)
+#define msr_hv (0)
+#endif
+#endif
+
+/* Exception state register bits definition */
+#define ESR_ST 23 /* Exception was caused by a store type access. */
+
+enum {
+ POWERPC_FLAG_NONE = 0x00000000,
+ /* Flag for MSR bit 25 signification (VRE/SPE) */
+ POWERPC_FLAG_SPE = 0x00000001,
+ POWERPC_FLAG_VRE = 0x00000002,
+ /* Flag for MSR bit 17 signification (TGPR/CE) */
+ POWERPC_FLAG_TGPR = 0x00000004,
+ POWERPC_FLAG_CE = 0x00000008,
+ /* Flag for MSR bit 10 signification (SE/DWE/UBLE) */
+ POWERPC_FLAG_SE = 0x00000010,
+ POWERPC_FLAG_DWE = 0x00000020,
+ POWERPC_FLAG_UBLE = 0x00000040,
+ /* Flag for MSR bit 9 signification (BE/DE) */
+ POWERPC_FLAG_BE = 0x00000080,
+ POWERPC_FLAG_DE = 0x00000100,
+ /* Flag for MSR bit 2 signification (PX/PMM) */
+ POWERPC_FLAG_PX = 0x00000200,
+ POWERPC_FLAG_PMM = 0x00000400,
+ /* Flag for special features */
+ /* Decrementer clock: RTC clock (POWER, 601) or bus clock */
+ POWERPC_FLAG_RTC_CLK = 0x00010000,
+ POWERPC_FLAG_BUS_CLK = 0x00020000,
+};
+
+/*****************************************************************************/
+/* Floating point status and control register */
+#define FPSCR_FX 31 /* Floating-point exception summary */
+#define FPSCR_FEX 30 /* Floating-point enabled exception summary */
+#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */
+#define FPSCR_OX 28 /* Floating-point overflow exception */
+#define FPSCR_UX 27 /* Floating-point underflow exception */
+#define FPSCR_ZX 26 /* Floating-point zero divide exception */
+#define FPSCR_XX 25 /* Floating-point inexact exception */
+#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */
+#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */
+#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */
+#define FPSCR_FR 18 /* Floating-point fraction rounded */
+#define FPSCR_FI 17 /* Floating-point fraction inexact */
+#define FPSCR_C 16 /* Floating-point result class descriptor */
+#define FPSCR_FL 15 /* Floating-point less than or negative */
+#define FPSCR_FG 14 /* Floating-point greater than or negative */
+#define FPSCR_FE 13 /* Floating-point equal or zero */
+#define FPSCR_FU 12 /* Floating-point unordered or NaN */
+#define FPSCR_FPCC 12 /* Floating-point condition code */
+#define FPSCR_FPRF 12 /* Floating-point result flags */
+#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */
+#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */
+#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */
+#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */
+#define FPSCR_OE 6 /* Floating-point overflow exception enable */
+#define FPSCR_UE 5 /* Floating-point undeflow exception enable */
+#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */
+#define FPSCR_XE 3 /* Floating-point inexact exception enable */
+#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
+#define FPSCR_RN1 1
+#define FPSCR_RN 0 /* Floating-point rounding control */
+#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
+#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
+#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
+#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1)
+#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1)
+#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1)
+#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1)
+#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1)
+#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1)
+#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1)
+#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1)
+#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1)
+#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF)
+#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1)
+#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1)
+#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1)
+#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1)
+#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1)
+#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1)
+#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
+#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
+#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
+#define fpscr_rn (((env->fpscr) >> FPSCR_RN) & 0x3)
+/* Invalid operation exception summary */
+#define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
+ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
+ (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \
+ (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \
+ (1 << FPSCR_VXCVI)))
+/* exception summary */
+#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F)
+/* enabled exception summary */
+#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
+ 0x1F)
+
+/*****************************************************************************/
+/* Vector status and control register */
+#define VSCR_NJ 16 /* Vector non-java */
+#define VSCR_SAT 0 /* Vector saturation */
+#define vscr_nj (((env->vscr) >> VSCR_NJ) & 0x1)
+#define vscr_sat (((env->vscr) >> VSCR_SAT) & 0x1)
+
+/*****************************************************************************/
+/* The whole PowerPC CPU context */
+#define NB_MMU_MODES 3
+
+struct CPUPPCState {
+ /* First are the most commonly used resources
+ * during translated code execution
+ */
+ /* general purpose registers */
+ target_ulong gpr[32];
+#if !defined(TARGET_PPC64)
+ /* Storage for GPR MSB, used by the SPE extension */
+ target_ulong gprh[32];
+#endif
+ /* LR */
+ target_ulong lr;
+ /* CTR */
+ target_ulong ctr;
+ /* condition register */
+ uint32_t crf[8];
+ /* XER */
+ target_ulong xer;
+ /* Reservation address */
+ target_ulong reserve_addr;
+ /* Reservation value */
+ target_ulong reserve_val;
+ /* Reservation store address */
+ target_ulong reserve_ea;
+ /* Reserved store source register and size */
+ target_ulong reserve_info;
+
+ /* Those ones are used in supervisor mode only */
+ /* machine state register */
+ target_ulong msr;
+ /* temporary general purpose registers */
+ target_ulong tgpr[4]; /* Used to speed-up TLB assist handlers */
+
+ /* Floating point execution context */
+ float_status fp_status;
+ /* floating point registers */
+ float64 fpr[32];
+ /* floating point status and control register */
+ uint32_t fpscr;
+
+ /* Next instruction pointer */
+ target_ulong nip;
+
+ int access_type; /* when a memory exception occurs, the access
+ type is stored here */
+
+ CPU_COMMON
+
+ /* MMU context - only relevant for full system emulation */
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+ /* Address space register */
+ target_ulong asr;
+ /* PowerPC 64 SLB area */
+ ppc_slb_t slb[64];
+ int slb_nr;
+#endif
+ /* segment registers */
+ target_ulong sdr1;
+ target_ulong sr[32];
+ /* BATs */
+ int nb_BATs;
+ target_ulong DBAT[2][8];
+ target_ulong IBAT[2][8];
+ /* PowerPC TLB registers (for 4xx and 60x software driven TLBs) */
+ int nb_tlb; /* Total number of TLB */
+ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */
+ int nb_ways; /* Number of ways in the TLB set */
+ int last_way; /* Last used way used to allocate TLB in a LRU way */
+ int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */
+ int nb_pids; /* Number of available PID registers */
+ ppc_tlb_t *tlb; /* TLB is optional. Allocate them only if needed */
+ /* 403 dedicated access protection registers */
+ target_ulong pb[4];
+#endif
+
+ /* Other registers */
+ /* Special purpose registers */
+ target_ulong spr[1024];
+ ppc_spr_t spr_cb[1024];
+ /* Altivec registers */
+ ppc_avr_t avr[32];
+ uint32_t vscr;
+ /* SPE registers */
+ uint64_t spe_acc;
+ uint32_t spe_fscr;
+ /* SPE and Altivec can share a status since they will never be used
+ * simultaneously */
+ float_status vec_status;
+
+ /* Internal devices resources */
+ /* Time base and decrementer */
+ ppc_tb_t *tb_env;
+ /* Device control registers */
+ ppc_dcr_t *dcr_env;
+
+ int dcache_line_size;
+ int icache_line_size;
+
+ /* Those resources are used during exception processing */
+ /* CPU model definition */
+ target_ulong msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ int bfd_mach;
+ uint32_t flags;
+ uint64_t insns_flags;
+
+ int error_code;
+ uint32_t pending_interrupts;
+#if !defined(CONFIG_USER_ONLY)
+ /* This is the IRQ controller, which is implementation dependant
+ * and only relevant when emulating a complete machine.
+ */
+ uint32_t irq_input_state;
+ void **irq_inputs;
+ /* Exception vectors */
+ target_ulong excp_vectors[POWERPC_EXCP_NB];
+ target_ulong excp_prefix;
+ target_ulong hreset_excp_prefix;
+ target_ulong ivor_mask;
+ target_ulong ivpr_mask;
+ target_ulong hreset_vector;
+#endif
+
+ /* Those resources are used only during code translation */
+ /* opcode handlers */
+ opc_handler_t *opcodes[0x40];
+
+ /* Those resources are used only in Qemu core */
+ target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */
+ target_ulong hflags_nmsr; /* specific hflags, not comming from MSR */
+ int mmu_idx; /* precomputed MMU index to speed up mem accesses */
+
+ /* Power management */
+ int power_mode;
+ int (*check_pow)(CPUPPCState *env);
+
+#if !defined(CONFIG_USER_ONLY)
+ void *load_info; /* Holds boot loading state. */
+#endif
+};
+
+#if !defined(CONFIG_USER_ONLY)
+/* Context used internally during MMU translations */
+typedef struct mmu_ctx_t mmu_ctx_t;
+struct mmu_ctx_t {
+ target_phys_addr_t raddr; /* Real address */
+ target_phys_addr_t eaddr; /* Effective address */
+ int prot; /* Protection bits */
+ target_phys_addr_t pg_addr[2]; /* PTE tables base addresses */
+ target_ulong ptem; /* Virtual segment ID | API */
+ int key; /* Access key */
+ int nx; /* Non-execute area */
+};
+#endif
+
+/*****************************************************************************/
+CPUPPCState *cpu_ppc_init (const char *cpu_model);
+void ppc_translate_init(void);
+int cpu_ppc_exec (CPUPPCState *s);
+void cpu_ppc_close (CPUPPCState *s);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_ppc_signal_handler (int host_signum, void *pinfo,
+ void *puc);
+int cpu_ppc_handle_mmu_fault (CPUPPCState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu);
+#define cpu_handle_mmu_fault cpu_ppc_handle_mmu_fault
+#if !defined(CONFIG_USER_ONLY)
+int get_physical_address (CPUPPCState *env, mmu_ctx_t *ctx, target_ulong vaddr,
+ int rw, int access_type);
+#endif
+void do_interrupt (CPUPPCState *env);
+void ppc_hw_interrupt (CPUPPCState *env);
+
+void cpu_dump_rfi (target_ulong RA, target_ulong msr);
+
+#if !defined(CONFIG_USER_ONLY)
+void ppc6xx_tlb_store (CPUPPCState *env, target_ulong EPN, int way, int is_code,
+ target_ulong pte0, target_ulong pte1);
+void ppc_store_ibatu (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_ibatl (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_dbatu (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_dbatl (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_ibatu_601 (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_ibatl_601 (CPUPPCState *env, int nr, target_ulong value);
+void ppc_store_sdr1 (CPUPPCState *env, target_ulong value);
+#if defined(TARGET_PPC64)
+void ppc_store_asr (CPUPPCState *env, target_ulong value);
+target_ulong ppc_load_slb (CPUPPCState *env, int slb_nr);
+target_ulong ppc_load_sr (CPUPPCState *env, int sr_nr);
+void ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
+#endif /* defined(TARGET_PPC64) */
+void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value);
+#endif /* !defined(CONFIG_USER_ONLY) */
+void ppc_store_msr (CPUPPCState *env, target_ulong value);
+
+void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
+
+const ppc_def_t *cpu_ppc_find_by_name (const char *name);
+int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def);
+
+/* Time-base and decrementer management */
+#ifndef NO_CPU_IO_DEFS
+uint64_t cpu_ppc_load_tbl (CPUPPCState *env);
+uint32_t cpu_ppc_load_tbu (CPUPPCState *env);
+void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
+uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
+void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
+uint32_t cpu_ppc_load_decr (CPUPPCState *env);
+void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
+uint32_t cpu_ppc_load_hdecr (CPUPPCState *env);
+void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_purr (CPUPPCState *env);
+void cpu_ppc_store_purr (CPUPPCState *env, uint64_t value);
+uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
+#if !defined(CONFIG_USER_ONLY)
+void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
+void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
+target_ulong load_40x_pit (CPUPPCState *env);
+void store_40x_pit (CPUPPCState *env, target_ulong val);
+void store_40x_dbcr0 (CPUPPCState *env, uint32_t val);
+void store_40x_sler (CPUPPCState *env, uint32_t val);
+void store_booke_tcr (CPUPPCState *env, target_ulong val);
+void store_booke_tsr (CPUPPCState *env, target_ulong val);
+void ppc_tlb_invalidate_all (CPUPPCState *env);
+void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
+#if defined(TARGET_PPC64)
+void ppc_slb_invalidate_all (CPUPPCState *env);
+void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0);
+#endif
+int ppcemb_tlb_search (CPUPPCState *env, target_ulong address, uint32_t pid);
+#endif
+#endif
+
+static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
+{
+ uint64_t gprv;
+
+ gprv = env->gpr[gprn];
+#if !defined(TARGET_PPC64)
+ if (env->flags & POWERPC_FLAG_SPE) {
+ /* If the CPU implements the SPE extension, we have to get the
+ * high bits of the GPR from the gprh storage area
+ */
+ gprv &= 0xFFFFFFFFULL;
+ gprv |= (uint64_t)env->gprh[gprn] << 32;
+ }
+#endif
+
+ return gprv;
+}
+
+/* Device control registers */
+int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
+int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
+
+#define cpu_init cpu_ppc_init
+#define cpu_exec cpu_ppc_exec
+#define cpu_gen_code cpu_ppc_gen_code
+#define cpu_signal_handler cpu_ppc_signal_handler
+#define cpu_list ppc_cpu_list
+
+#define CPU_SAVE_VERSION 4
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _user
+#define MMU_MODE1_SUFFIX _kernel
+#define MMU_MODE2_SUFFIX _hypv
+#define MMU_USER_IDX 0
+static inline int cpu_mmu_index (CPUState *env)
+{
+ return env->mmu_idx;
+}
+
+#if defined(CONFIG_USER_ONLY)
+static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+{
+ if (newsp)
+ env->gpr[1] = newsp;
+ env->gpr[3] = 0;
+}
+#endif
+
+#include "cpu-all.h"
+
+/*****************************************************************************/
+/* CRF definitions */
+#define CRF_LT 3
+#define CRF_GT 2
+#define CRF_EQ 1
+#define CRF_SO 0
+#define CRF_CH (1 << CRF_LT)
+#define CRF_CL (1 << CRF_GT)
+#define CRF_CH_OR_CL (1 << CRF_EQ)
+#define CRF_CH_AND_CL (1 << CRF_SO)
+
+/* XER definitions */
+#define XER_SO 31
+#define XER_OV 30
+#define XER_CA 29
+#define XER_CMP 8
+#define XER_BC 0
+#define xer_so ((env->xer >> XER_SO) & 1)
+#define xer_ov ((env->xer >> XER_OV) & 1)
+#define xer_ca ((env->xer >> XER_CA) & 1)
+#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
+#define xer_bc ((env->xer >> XER_BC) & 0x7F)
+
+/* SPR definitions */
+#define SPR_MQ (0x000)
+#define SPR_XER (0x001)
+#define SPR_601_VRTCU (0x004)
+#define SPR_601_VRTCL (0x005)
+#define SPR_601_UDECR (0x006)
+#define SPR_LR (0x008)
+#define SPR_CTR (0x009)
+#define SPR_DSISR (0x012)
+#define SPR_DAR (0x013) /* DAE for PowerPC 601 */
+#define SPR_601_RTCU (0x014)
+#define SPR_601_RTCL (0x015)
+#define SPR_DECR (0x016)
+#define SPR_SDR1 (0x019)
+#define SPR_SRR0 (0x01A)
+#define SPR_SRR1 (0x01B)
+#define SPR_AMR (0x01D)
+#define SPR_BOOKE_PID (0x030)
+#define SPR_BOOKE_DECAR (0x036)
+#define SPR_BOOKE_CSRR0 (0x03A)
+#define SPR_BOOKE_CSRR1 (0x03B)
+#define SPR_BOOKE_DEAR (0x03D)
+#define SPR_BOOKE_ESR (0x03E)
+#define SPR_BOOKE_IVPR (0x03F)
+#define SPR_MPC_EIE (0x050)
+#define SPR_MPC_EID (0x051)
+#define SPR_MPC_NRI (0x052)
+#define SPR_CTRL (0x088)
+#define SPR_MPC_CMPA (0x090)
+#define SPR_MPC_CMPB (0x091)
+#define SPR_MPC_CMPC (0x092)
+#define SPR_MPC_CMPD (0x093)
+#define SPR_MPC_ECR (0x094)
+#define SPR_MPC_DER (0x095)
+#define SPR_MPC_COUNTA (0x096)
+#define SPR_MPC_COUNTB (0x097)
+#define SPR_UCTRL (0x098)
+#define SPR_MPC_CMPE (0x098)
+#define SPR_MPC_CMPF (0x099)
+#define SPR_MPC_CMPG (0x09A)
+#define SPR_MPC_CMPH (0x09B)
+#define SPR_MPC_LCTRL1 (0x09C)
+#define SPR_MPC_LCTRL2 (0x09D)
+#define SPR_MPC_ICTRL (0x09E)
+#define SPR_MPC_BAR (0x09F)
+#define SPR_VRSAVE (0x100)
+#define SPR_USPRG0 (0x100)
+#define SPR_USPRG1 (0x101)
+#define SPR_USPRG2 (0x102)
+#define SPR_USPRG3 (0x103)
+#define SPR_USPRG4 (0x104)
+#define SPR_USPRG5 (0x105)
+#define SPR_USPRG6 (0x106)
+#define SPR_USPRG7 (0x107)
+#define SPR_VTBL (0x10C)
+#define SPR_VTBU (0x10D)
+#define SPR_SPRG0 (0x110)
+#define SPR_SPRG1 (0x111)
+#define SPR_SPRG2 (0x112)
+#define SPR_SPRG3 (0x113)
+#define SPR_SPRG4 (0x114)
+#define SPR_SCOMC (0x114)
+#define SPR_SPRG5 (0x115)
+#define SPR_SCOMD (0x115)
+#define SPR_SPRG6 (0x116)
+#define SPR_SPRG7 (0x117)
+#define SPR_ASR (0x118)
+#define SPR_EAR (0x11A)
+#define SPR_TBL (0x11C)
+#define SPR_TBU (0x11D)
+#define SPR_TBU40 (0x11E)
+#define SPR_SVR (0x11E)
+#define SPR_BOOKE_PIR (0x11E)
+#define SPR_PVR (0x11F)
+#define SPR_HSPRG0 (0x130)
+#define SPR_BOOKE_DBSR (0x130)
+#define SPR_HSPRG1 (0x131)
+#define SPR_HDSISR (0x132)
+#define SPR_HDAR (0x133)
+#define SPR_BOOKE_DBCR0 (0x134)
+#define SPR_IBCR (0x135)
+#define SPR_PURR (0x135)
+#define SPR_BOOKE_DBCR1 (0x135)
+#define SPR_DBCR (0x136)
+#define SPR_HDEC (0x136)
+#define SPR_BOOKE_DBCR2 (0x136)
+#define SPR_HIOR (0x137)
+#define SPR_MBAR (0x137)
+#define SPR_RMOR (0x138)
+#define SPR_BOOKE_IAC1 (0x138)
+#define SPR_HRMOR (0x139)
+#define SPR_BOOKE_IAC2 (0x139)
+#define SPR_HSRR0 (0x13A)
+#define SPR_BOOKE_IAC3 (0x13A)
+#define SPR_HSRR1 (0x13B)
+#define SPR_BOOKE_IAC4 (0x13B)
+#define SPR_LPCR (0x13C)
+#define SPR_BOOKE_DAC1 (0x13C)
+#define SPR_LPIDR (0x13D)
+#define SPR_DABR2 (0x13D)
+#define SPR_BOOKE_DAC2 (0x13D)
+#define SPR_BOOKE_DVC1 (0x13E)
+#define SPR_BOOKE_DVC2 (0x13F)
+#define SPR_BOOKE_TSR (0x150)
+#define SPR_BOOKE_TCR (0x154)
+#define SPR_BOOKE_IVOR0 (0x190)
+#define SPR_BOOKE_IVOR1 (0x191)
+#define SPR_BOOKE_IVOR2 (0x192)
+#define SPR_BOOKE_IVOR3 (0x193)
+#define SPR_BOOKE_IVOR4 (0x194)
+#define SPR_BOOKE_IVOR5 (0x195)
+#define SPR_BOOKE_IVOR6 (0x196)
+#define SPR_BOOKE_IVOR7 (0x197)
+#define SPR_BOOKE_IVOR8 (0x198)
+#define SPR_BOOKE_IVOR9 (0x199)
+#define SPR_BOOKE_IVOR10 (0x19A)
+#define SPR_BOOKE_IVOR11 (0x19B)
+#define SPR_BOOKE_IVOR12 (0x19C)
+#define SPR_BOOKE_IVOR13 (0x19D)
+#define SPR_BOOKE_IVOR14 (0x19E)
+#define SPR_BOOKE_IVOR15 (0x19F)
+#define SPR_BOOKE_SPEFSCR (0x200)
+#define SPR_Exxx_BBEAR (0x201)
+#define SPR_Exxx_BBTAR (0x202)
+#define SPR_Exxx_L1CFG0 (0x203)
+#define SPR_Exxx_NPIDR (0x205)
+#define SPR_ATBL (0x20E)
+#define SPR_ATBU (0x20F)
+#define SPR_IBAT0U (0x210)
+#define SPR_BOOKE_IVOR32 (0x210)
+#define SPR_RCPU_MI_GRA (0x210)
+#define SPR_IBAT0L (0x211)
+#define SPR_BOOKE_IVOR33 (0x211)
+#define SPR_IBAT1U (0x212)
+#define SPR_BOOKE_IVOR34 (0x212)
+#define SPR_IBAT1L (0x213)
+#define SPR_BOOKE_IVOR35 (0x213)
+#define SPR_IBAT2U (0x214)
+#define SPR_BOOKE_IVOR36 (0x214)
+#define SPR_IBAT2L (0x215)
+#define SPR_BOOKE_IVOR37 (0x215)
+#define SPR_IBAT3U (0x216)
+#define SPR_IBAT3L (0x217)
+#define SPR_DBAT0U (0x218)
+#define SPR_RCPU_L2U_GRA (0x218)
+#define SPR_DBAT0L (0x219)
+#define SPR_DBAT1U (0x21A)
+#define SPR_DBAT1L (0x21B)
+#define SPR_DBAT2U (0x21C)
+#define SPR_DBAT2L (0x21D)
+#define SPR_DBAT3U (0x21E)
+#define SPR_DBAT3L (0x21F)
+#define SPR_IBAT4U (0x230)
+#define SPR_RPCU_BBCMCR (0x230)
+#define SPR_MPC_IC_CST (0x230)
+#define SPR_Exxx_CTXCR (0x230)
+#define SPR_IBAT4L (0x231)
+#define SPR_MPC_IC_ADR (0x231)
+#define SPR_Exxx_DBCR3 (0x231)
+#define SPR_IBAT5U (0x232)
+#define SPR_MPC_IC_DAT (0x232)
+#define SPR_Exxx_DBCNT (0x232)
+#define SPR_IBAT5L (0x233)
+#define SPR_IBAT6U (0x234)
+#define SPR_IBAT6L (0x235)
+#define SPR_IBAT7U (0x236)
+#define SPR_IBAT7L (0x237)
+#define SPR_DBAT4U (0x238)
+#define SPR_RCPU_L2U_MCR (0x238)
+#define SPR_MPC_DC_CST (0x238)
+#define SPR_Exxx_ALTCTXCR (0x238)
+#define SPR_DBAT4L (0x239)
+#define SPR_MPC_DC_ADR (0x239)
+#define SPR_DBAT5U (0x23A)
+#define SPR_BOOKE_MCSRR0 (0x23A)
+#define SPR_MPC_DC_DAT (0x23A)
+#define SPR_DBAT5L (0x23B)
+#define SPR_BOOKE_MCSRR1 (0x23B)
+#define SPR_DBAT6U (0x23C)
+#define SPR_BOOKE_MCSR (0x23C)
+#define SPR_DBAT6L (0x23D)
+#define SPR_Exxx_MCAR (0x23D)
+#define SPR_DBAT7U (0x23E)
+#define SPR_BOOKE_DSRR0 (0x23E)
+#define SPR_DBAT7L (0x23F)
+#define SPR_BOOKE_DSRR1 (0x23F)
+#define SPR_BOOKE_SPRG8 (0x25C)
+#define SPR_BOOKE_SPRG9 (0x25D)
+#define SPR_BOOKE_MAS0 (0x270)
+#define SPR_BOOKE_MAS1 (0x271)
+#define SPR_BOOKE_MAS2 (0x272)
+#define SPR_BOOKE_MAS3 (0x273)
+#define SPR_BOOKE_MAS4 (0x274)
+#define SPR_BOOKE_MAS5 (0x275)
+#define SPR_BOOKE_MAS6 (0x276)
+#define SPR_BOOKE_PID1 (0x279)
+#define SPR_BOOKE_PID2 (0x27A)
+#define SPR_MPC_DPDR (0x280)
+#define SPR_MPC_IMMR (0x288)
+#define SPR_BOOKE_TLB0CFG (0x2B0)
+#define SPR_BOOKE_TLB1CFG (0x2B1)
+#define SPR_BOOKE_TLB2CFG (0x2B2)
+#define SPR_BOOKE_TLB3CFG (0x2B3)
+#define SPR_BOOKE_EPR (0x2BE)
+#define SPR_PERF0 (0x300)
+#define SPR_RCPU_MI_RBA0 (0x300)
+#define SPR_MPC_MI_CTR (0x300)
+#define SPR_PERF1 (0x301)
+#define SPR_RCPU_MI_RBA1 (0x301)
+#define SPR_PERF2 (0x302)
+#define SPR_RCPU_MI_RBA2 (0x302)
+#define SPR_MPC_MI_AP (0x302)
+#define SPR_PERF3 (0x303)
+#define SPR_620_PMC1R (0x303)
+#define SPR_RCPU_MI_RBA3 (0x303)
+#define SPR_MPC_MI_EPN (0x303)
+#define SPR_PERF4 (0x304)
+#define SPR_620_PMC2R (0x304)
+#define SPR_PERF5 (0x305)
+#define SPR_MPC_MI_TWC (0x305)
+#define SPR_PERF6 (0x306)
+#define SPR_MPC_MI_RPN (0x306)
+#define SPR_PERF7 (0x307)
+#define SPR_PERF8 (0x308)
+#define SPR_RCPU_L2U_RBA0 (0x308)
+#define SPR_MPC_MD_CTR (0x308)
+#define SPR_PERF9 (0x309)
+#define SPR_RCPU_L2U_RBA1 (0x309)
+#define SPR_MPC_MD_CASID (0x309)
+#define SPR_PERFA (0x30A)
+#define SPR_RCPU_L2U_RBA2 (0x30A)
+#define SPR_MPC_MD_AP (0x30A)
+#define SPR_PERFB (0x30B)
+#define SPR_620_MMCR0R (0x30B)
+#define SPR_RCPU_L2U_RBA3 (0x30B)
+#define SPR_MPC_MD_EPN (0x30B)
+#define SPR_PERFC (0x30C)
+#define SPR_MPC_MD_TWB (0x30C)
+#define SPR_PERFD (0x30D)
+#define SPR_MPC_MD_TWC (0x30D)
+#define SPR_PERFE (0x30E)
+#define SPR_MPC_MD_RPN (0x30E)
+#define SPR_PERFF (0x30F)
+#define SPR_MPC_MD_TW (0x30F)
+#define SPR_UPERF0 (0x310)
+#define SPR_UPERF1 (0x311)
+#define SPR_UPERF2 (0x312)
+#define SPR_UPERF3 (0x313)
+#define SPR_620_PMC1W (0x313)
+#define SPR_UPERF4 (0x314)
+#define SPR_620_PMC2W (0x314)
+#define SPR_UPERF5 (0x315)
+#define SPR_UPERF6 (0x316)
+#define SPR_UPERF7 (0x317)
+#define SPR_UPERF8 (0x318)
+#define SPR_UPERF9 (0x319)
+#define SPR_UPERFA (0x31A)
+#define SPR_UPERFB (0x31B)
+#define SPR_620_MMCR0W (0x31B)
+#define SPR_UPERFC (0x31C)
+#define SPR_UPERFD (0x31D)
+#define SPR_UPERFE (0x31E)
+#define SPR_UPERFF (0x31F)
+#define SPR_RCPU_MI_RA0 (0x320)
+#define SPR_MPC_MI_DBCAM (0x320)
+#define SPR_RCPU_MI_RA1 (0x321)
+#define SPR_MPC_MI_DBRAM0 (0x321)
+#define SPR_RCPU_MI_RA2 (0x322)
+#define SPR_MPC_MI_DBRAM1 (0x322)
+#define SPR_RCPU_MI_RA3 (0x323)
+#define SPR_RCPU_L2U_RA0 (0x328)
+#define SPR_MPC_MD_DBCAM (0x328)
+#define SPR_RCPU_L2U_RA1 (0x329)
+#define SPR_MPC_MD_DBRAM0 (0x329)
+#define SPR_RCPU_L2U_RA2 (0x32A)
+#define SPR_MPC_MD_DBRAM1 (0x32A)
+#define SPR_RCPU_L2U_RA3 (0x32B)
+#define SPR_440_INV0 (0x370)
+#define SPR_440_INV1 (0x371)
+#define SPR_440_INV2 (0x372)
+#define SPR_440_INV3 (0x373)
+#define SPR_440_ITV0 (0x374)
+#define SPR_440_ITV1 (0x375)
+#define SPR_440_ITV2 (0x376)
+#define SPR_440_ITV3 (0x377)
+#define SPR_440_CCR1 (0x378)
+#define SPR_DCRIPR (0x37B)
+#define SPR_PPR (0x380)
+#define SPR_750_GQR0 (0x390)
+#define SPR_440_DNV0 (0x390)
+#define SPR_750_GQR1 (0x391)
+#define SPR_440_DNV1 (0x391)
+#define SPR_750_GQR2 (0x392)
+#define SPR_440_DNV2 (0x392)
+#define SPR_750_GQR3 (0x393)
+#define SPR_440_DNV3 (0x393)
+#define SPR_750_GQR4 (0x394)
+#define SPR_440_DTV0 (0x394)
+#define SPR_750_GQR5 (0x395)
+#define SPR_440_DTV1 (0x395)
+#define SPR_750_GQR6 (0x396)
+#define SPR_440_DTV2 (0x396)
+#define SPR_750_GQR7 (0x397)
+#define SPR_440_DTV3 (0x397)
+#define SPR_750_THRM4 (0x398)
+#define SPR_750CL_HID2 (0x398)
+#define SPR_440_DVLIM (0x398)
+#define SPR_750_WPAR (0x399)
+#define SPR_440_IVLIM (0x399)
+#define SPR_750_DMAU (0x39A)
+#define SPR_750_DMAL (0x39B)
+#define SPR_440_RSTCFG (0x39B)
+#define SPR_BOOKE_DCDBTRL (0x39C)
+#define SPR_BOOKE_DCDBTRH (0x39D)
+#define SPR_BOOKE_ICDBTRL (0x39E)
+#define SPR_BOOKE_ICDBTRH (0x39F)
+#define SPR_UMMCR2 (0x3A0)
+#define SPR_UPMC5 (0x3A1)
+#define SPR_UPMC6 (0x3A2)
+#define SPR_UBAMR (0x3A7)
+#define SPR_UMMCR0 (0x3A8)
+#define SPR_UPMC1 (0x3A9)
+#define SPR_UPMC2 (0x3AA)
+#define SPR_USIAR (0x3AB)
+#define SPR_UMMCR1 (0x3AC)
+#define SPR_UPMC3 (0x3AD)
+#define SPR_UPMC4 (0x3AE)
+#define SPR_USDA (0x3AF)
+#define SPR_40x_ZPR (0x3B0)
+#define SPR_BOOKE_MAS7 (0x3B0)
+#define SPR_620_PMR0 (0x3B0)
+#define SPR_MMCR2 (0x3B0)
+#define SPR_PMC5 (0x3B1)
+#define SPR_40x_PID (0x3B1)
+#define SPR_620_PMR1 (0x3B1)
+#define SPR_PMC6 (0x3B2)
+#define SPR_440_MMUCR (0x3B2)
+#define SPR_620_PMR2 (0x3B2)
+#define SPR_4xx_CCR0 (0x3B3)
+#define SPR_BOOKE_EPLC (0x3B3)
+#define SPR_620_PMR3 (0x3B3)
+#define SPR_405_IAC3 (0x3B4)
+#define SPR_BOOKE_EPSC (0x3B4)
+#define SPR_620_PMR4 (0x3B4)
+#define SPR_405_IAC4 (0x3B5)
+#define SPR_620_PMR5 (0x3B5)
+#define SPR_405_DVC1 (0x3B6)
+#define SPR_620_PMR6 (0x3B6)
+#define SPR_405_DVC2 (0x3B7)
+#define SPR_620_PMR7 (0x3B7)
+#define SPR_BAMR (0x3B7)
+#define SPR_MMCR0 (0x3B8)
+#define SPR_620_PMR8 (0x3B8)
+#define SPR_PMC1 (0x3B9)
+#define SPR_40x_SGR (0x3B9)
+#define SPR_620_PMR9 (0x3B9)
+#define SPR_PMC2 (0x3BA)
+#define SPR_40x_DCWR (0x3BA)
+#define SPR_620_PMRA (0x3BA)
+#define SPR_SIAR (0x3BB)
+#define SPR_405_SLER (0x3BB)
+#define SPR_620_PMRB (0x3BB)
+#define SPR_MMCR1 (0x3BC)
+#define SPR_405_SU0R (0x3BC)
+#define SPR_620_PMRC (0x3BC)
+#define SPR_401_SKR (0x3BC)
+#define SPR_PMC3 (0x3BD)
+#define SPR_405_DBCR1 (0x3BD)
+#define SPR_620_PMRD (0x3BD)
+#define SPR_PMC4 (0x3BE)
+#define SPR_620_PMRE (0x3BE)
+#define SPR_SDA (0x3BF)
+#define SPR_620_PMRF (0x3BF)
+#define SPR_403_VTBL (0x3CC)
+#define SPR_403_VTBU (0x3CD)
+#define SPR_DMISS (0x3D0)
+#define SPR_DCMP (0x3D1)
+#define SPR_HASH1 (0x3D2)
+#define SPR_HASH2 (0x3D3)
+#define SPR_BOOKE_ICDBDR (0x3D3)
+#define SPR_TLBMISS (0x3D4)
+#define SPR_IMISS (0x3D4)
+#define SPR_40x_ESR (0x3D4)
+#define SPR_PTEHI (0x3D5)
+#define SPR_ICMP (0x3D5)
+#define SPR_40x_DEAR (0x3D5)
+#define SPR_PTELO (0x3D6)
+#define SPR_RPA (0x3D6)
+#define SPR_40x_EVPR (0x3D6)
+#define SPR_L3PM (0x3D7)
+#define SPR_403_CDBCR (0x3D7)
+#define SPR_L3ITCR0 (0x3D8)
+#define SPR_TCR (0x3D8)
+#define SPR_40x_TSR (0x3D8)
+#define SPR_IBR (0x3DA)
+#define SPR_40x_TCR (0x3DA)
+#define SPR_ESASRR (0x3DB)
+#define SPR_40x_PIT (0x3DB)
+#define SPR_403_TBL (0x3DC)
+#define SPR_403_TBU (0x3DD)
+#define SPR_SEBR (0x3DE)
+#define SPR_40x_SRR2 (0x3DE)
+#define SPR_SER (0x3DF)
+#define SPR_40x_SRR3 (0x3DF)
+#define SPR_L3OHCR (0x3E8)
+#define SPR_L3ITCR1 (0x3E9)
+#define SPR_L3ITCR2 (0x3EA)
+#define SPR_L3ITCR3 (0x3EB)
+#define SPR_HID0 (0x3F0)
+#define SPR_40x_DBSR (0x3F0)
+#define SPR_HID1 (0x3F1)
+#define SPR_IABR (0x3F2)
+#define SPR_40x_DBCR0 (0x3F2)
+#define SPR_601_HID2 (0x3F2)
+#define SPR_Exxx_L1CSR0 (0x3F2)
+#define SPR_ICTRL (0x3F3)
+#define SPR_HID2 (0x3F3)
+#define SPR_750CL_HID4 (0x3F3)
+#define SPR_Exxx_L1CSR1 (0x3F3)
+#define SPR_440_DBDR (0x3F3)
+#define SPR_LDSTDB (0x3F4)
+#define SPR_750_TDCL (0x3F4)
+#define SPR_40x_IAC1 (0x3F4)
+#define SPR_MMUCSR0 (0x3F4)
+#define SPR_DABR (0x3F5)
+#define DABR_MASK (~(target_ulong)0x7)
+#define SPR_Exxx_BUCSR (0x3F5)
+#define SPR_40x_IAC2 (0x3F5)
+#define SPR_601_HID5 (0x3F5)
+#define SPR_40x_DAC1 (0x3F6)
+#define SPR_MSSCR0 (0x3F6)
+#define SPR_970_HID5 (0x3F6)
+#define SPR_MSSSR0 (0x3F7)
+#define SPR_MSSCR1 (0x3F7)
+#define SPR_DABRX (0x3F7)
+#define SPR_40x_DAC2 (0x3F7)
+#define SPR_MMUCFG (0x3F7)
+#define SPR_LDSTCR (0x3F8)
+#define SPR_L2PMCR (0x3F8)
+#define SPR_750FX_HID2 (0x3F8)
+#define SPR_620_BUSCSR (0x3F8)
+#define SPR_Exxx_L1FINV0 (0x3F8)
+#define SPR_L2CR (0x3F9)
+#define SPR_620_L2CR (0x3F9)
+#define SPR_L3CR (0x3FA)
+#define SPR_750_TDCH (0x3FA)
+#define SPR_IABR2 (0x3FA)
+#define SPR_40x_DCCR (0x3FA)
+#define SPR_620_L2SR (0x3FA)
+#define SPR_ICTC (0x3FB)
+#define SPR_40x_ICCR (0x3FB)
+#define SPR_THRM1 (0x3FC)
+#define SPR_403_PBL1 (0x3FC)
+#define SPR_SP (0x3FD)
+#define SPR_THRM2 (0x3FD)
+#define SPR_403_PBU1 (0x3FD)
+#define SPR_604_HID13 (0x3FD)
+#define SPR_LT (0x3FE)
+#define SPR_THRM3 (0x3FE)
+#define SPR_RCPU_FPECR (0x3FE)
+#define SPR_403_PBL2 (0x3FE)
+#define SPR_PIR (0x3FF)
+#define SPR_403_PBU2 (0x3FF)
+#define SPR_601_HID15 (0x3FF)
+#define SPR_604_HID15 (0x3FF)
+#define SPR_E500_SVR (0x3FF)
+
+/*****************************************************************************/
+/* PowerPC Instructions types definitions */
+enum {
+ PPC_NONE = 0x0000000000000000ULL,
+ /* PowerPC base instructions set */
+ PPC_INSNS_BASE = 0x0000000000000001ULL,
+ /* integer operations instructions */
+#define PPC_INTEGER PPC_INSNS_BASE
+ /* flow control instructions */
+#define PPC_FLOW PPC_INSNS_BASE
+ /* virtual memory instructions */
+#define PPC_MEM PPC_INSNS_BASE
+ /* ld/st with reservation instructions */
+#define PPC_RES PPC_INSNS_BASE
+ /* spr/msr access instructions */
+#define PPC_MISC PPC_INSNS_BASE
+ /* Deprecated instruction sets */
+ /* Original POWER instruction set */
+ PPC_POWER = 0x0000000000000002ULL,
+ /* POWER2 instruction set extension */
+ PPC_POWER2 = 0x0000000000000004ULL,
+ /* Power RTC support */
+ PPC_POWER_RTC = 0x0000000000000008ULL,
+ /* Power-to-PowerPC bridge (601) */
+ PPC_POWER_BR = 0x0000000000000010ULL,
+ /* 64 bits PowerPC instruction set */
+ PPC_64B = 0x0000000000000020ULL,
+ /* New 64 bits extensions (PowerPC 2.0x) */
+ PPC_64BX = 0x0000000000000040ULL,
+ /* 64 bits hypervisor extensions */
+ PPC_64H = 0x0000000000000080ULL,
+ /* New wait instruction (PowerPC 2.0x) */
+ PPC_WAIT = 0x0000000000000100ULL,
+ /* Time base mftb instruction */
+ PPC_MFTB = 0x0000000000000200ULL,
+
+ /* Fixed-point unit extensions */
+ /* PowerPC 602 specific */
+ PPC_602_SPEC = 0x0000000000000400ULL,
+ /* isel instruction */
+ PPC_ISEL = 0x0000000000000800ULL,
+ /* popcntb instruction */
+ PPC_POPCNTB = 0x0000000000001000ULL,
+ /* string load / store */
+ PPC_STRING = 0x0000000000002000ULL,
+
+ /* Floating-point unit extensions */
+ /* Optional floating point instructions */
+ PPC_FLOAT = 0x0000000000010000ULL,
+ /* New floating-point extensions (PowerPC 2.0x) */
+ PPC_FLOAT_EXT = 0x0000000000020000ULL,
+ PPC_FLOAT_FSQRT = 0x0000000000040000ULL,
+ PPC_FLOAT_FRES = 0x0000000000080000ULL,
+ PPC_FLOAT_FRSQRTE = 0x0000000000100000ULL,
+ PPC_FLOAT_FRSQRTES = 0x0000000000200000ULL,
+ PPC_FLOAT_FSEL = 0x0000000000400000ULL,
+ PPC_FLOAT_STFIWX = 0x0000000000800000ULL,
+
+ /* Vector/SIMD extensions */
+ /* Altivec support */
+ PPC_ALTIVEC = 0x0000000001000000ULL,
+ /* PowerPC 2.03 SPE extension */
+ PPC_SPE = 0x0000000002000000ULL,
+ /* PowerPC 2.03 SPE single-precision floating-point extension */
+ PPC_SPE_SINGLE = 0x0000000004000000ULL,
+ /* PowerPC 2.03 SPE double-precision floating-point extension */
+ PPC_SPE_DOUBLE = 0x0000000008000000ULL,
+
+ /* Optional memory control instructions */
+ PPC_MEM_TLBIA = 0x0000000010000000ULL,
+ PPC_MEM_TLBIE = 0x0000000020000000ULL,
+ PPC_MEM_TLBSYNC = 0x0000000040000000ULL,
+ /* sync instruction */
+ PPC_MEM_SYNC = 0x0000000080000000ULL,
+ /* eieio instruction */
+ PPC_MEM_EIEIO = 0x0000000100000000ULL,
+
+ /* Cache control instructions */
+ PPC_CACHE = 0x0000000200000000ULL,
+ /* icbi instruction */
+ PPC_CACHE_ICBI = 0x0000000400000000ULL,
+ /* dcbz instruction with fixed cache line size */
+ PPC_CACHE_DCBZ = 0x0000000800000000ULL,
+ /* dcbz instruction with tunable cache line size */
+ PPC_CACHE_DCBZT = 0x0000001000000000ULL,
+ /* dcba instruction */
+ PPC_CACHE_DCBA = 0x0000002000000000ULL,
+ /* Freescale cache locking instructions */
+ PPC_CACHE_LOCK = 0x0000004000000000ULL,
+
+ /* MMU related extensions */
+ /* external control instructions */
+ PPC_EXTERN = 0x0000010000000000ULL,
+ /* segment register access instructions */
+ PPC_SEGMENT = 0x0000020000000000ULL,
+ /* PowerPC 6xx TLB management instructions */
+ PPC_6xx_TLB = 0x0000040000000000ULL,
+ /* PowerPC 74xx TLB management instructions */
+ PPC_74xx_TLB = 0x0000080000000000ULL,
+ /* PowerPC 40x TLB management instructions */
+ PPC_40x_TLB = 0x0000100000000000ULL,
+ /* segment register access instructions for PowerPC 64 "bridge" */
+ PPC_SEGMENT_64B = 0x0000200000000000ULL,
+ /* SLB management */
+ PPC_SLBI = 0x0000400000000000ULL,
+
+ /* Embedded PowerPC dedicated instructions */
+ PPC_WRTEE = 0x0001000000000000ULL,
+ /* PowerPC 40x exception model */
+ PPC_40x_EXCP = 0x0002000000000000ULL,
+ /* PowerPC 405 Mac instructions */
+ PPC_405_MAC = 0x0004000000000000ULL,
+ /* PowerPC 440 specific instructions */
+ PPC_440_SPEC = 0x0008000000000000ULL,
+ /* BookE (embedded) PowerPC specification */
+ PPC_BOOKE = 0x0010000000000000ULL,
+ /* mfapidi instruction */
+ PPC_MFAPIDI = 0x0020000000000000ULL,
+ /* tlbiva instruction */
+ PPC_TLBIVA = 0x0040000000000000ULL,
+ /* tlbivax instruction */
+ PPC_TLBIVAX = 0x0080000000000000ULL,
+ /* PowerPC 4xx dedicated instructions */
+ PPC_4xx_COMMON = 0x0100000000000000ULL,
+ /* PowerPC 40x ibct instructions */
+ PPC_40x_ICBT = 0x0200000000000000ULL,
+ /* rfmci is not implemented in all BookE PowerPC */
+ PPC_RFMCI = 0x0400000000000000ULL,
+ /* rfdi instruction */
+ PPC_RFDI = 0x0800000000000000ULL,
+ /* DCR accesses */
+ PPC_DCR = 0x1000000000000000ULL,
+ /* DCR extended accesse */
+ PPC_DCRX = 0x2000000000000000ULL,
+ /* user-mode DCR access, implemented in PowerPC 460 */
+ PPC_DCRUX = 0x4000000000000000ULL,
+};
+
+/*****************************************************************************/
+/* Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_USER = 0x00,
+ ACCESS_SUPER = 0x01,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+ ACCESS_RES = 0x40, /* load/store with reservation */
+ ACCESS_EXT = 0x50, /* external access */
+ ACCESS_CACHE = 0x60, /* Cache manipulation */
+};
+
+/* Hardware interruption sources:
+ * all those exception can be raised simulteaneously
+ */
+/* Input pins definitions */
+enum {
+ /* 6xx bus input pins */
+ PPC6xx_INPUT_HRESET = 0,
+ PPC6xx_INPUT_SRESET = 1,
+ PPC6xx_INPUT_CKSTP_IN = 2,
+ PPC6xx_INPUT_MCP = 3,
+ PPC6xx_INPUT_SMI = 4,
+ PPC6xx_INPUT_INT = 5,
+ PPC6xx_INPUT_TBEN = 6,
+ PPC6xx_INPUT_WAKEUP = 7,
+ PPC6xx_INPUT_NB,
+};
+
+enum {
+ /* Embedded PowerPC input pins */
+ PPCBookE_INPUT_HRESET = 0,
+ PPCBookE_INPUT_SRESET = 1,
+ PPCBookE_INPUT_CKSTP_IN = 2,
+ PPCBookE_INPUT_MCP = 3,
+ PPCBookE_INPUT_SMI = 4,
+ PPCBookE_INPUT_INT = 5,
+ PPCBookE_INPUT_CINT = 6,
+ PPCBookE_INPUT_NB,
+};
+
+enum {
+ /* PowerPC E500 input pins */
+ PPCE500_INPUT_RESET_CORE = 0,
+ PPCE500_INPUT_MCK = 1,
+ PPCE500_INPUT_CINT = 3,
+ PPCE500_INPUT_INT = 4,
+ PPCE500_INPUT_DEBUG = 6,
+ PPCE500_INPUT_NB,
+};
+
+enum {
+ /* PowerPC 40x input pins */
+ PPC40x_INPUT_RESET_CORE = 0,
+ PPC40x_INPUT_RESET_CHIP = 1,
+ PPC40x_INPUT_RESET_SYS = 2,
+ PPC40x_INPUT_CINT = 3,
+ PPC40x_INPUT_INT = 4,
+ PPC40x_INPUT_HALT = 5,
+ PPC40x_INPUT_DEBUG = 6,
+ PPC40x_INPUT_NB,
+};
+
+enum {
+ /* RCPU input pins */
+ PPCRCPU_INPUT_PORESET = 0,
+ PPCRCPU_INPUT_HRESET = 1,
+ PPCRCPU_INPUT_SRESET = 2,
+ PPCRCPU_INPUT_IRQ0 = 3,
+ PPCRCPU_INPUT_IRQ1 = 4,
+ PPCRCPU_INPUT_IRQ2 = 5,
+ PPCRCPU_INPUT_IRQ3 = 6,
+ PPCRCPU_INPUT_IRQ4 = 7,
+ PPCRCPU_INPUT_IRQ5 = 8,
+ PPCRCPU_INPUT_IRQ6 = 9,
+ PPCRCPU_INPUT_IRQ7 = 10,
+ PPCRCPU_INPUT_NB,
+};
+
+#if defined(TARGET_PPC64)
+enum {
+ /* PowerPC 970 input pins */
+ PPC970_INPUT_HRESET = 0,
+ PPC970_INPUT_SRESET = 1,
+ PPC970_INPUT_CKSTP = 2,
+ PPC970_INPUT_TBEN = 3,
+ PPC970_INPUT_MCP = 4,
+ PPC970_INPUT_INT = 5,
+ PPC970_INPUT_THINT = 6,
+ PPC970_INPUT_NB,
+};
+#endif
+
+/* Hardware exceptions definitions */
+enum {
+ /* External hardware exception sources */
+ PPC_INTERRUPT_RESET = 0, /* Reset exception */
+ PPC_INTERRUPT_WAKEUP, /* Wakeup exception */
+ PPC_INTERRUPT_MCK, /* Machine check exception */
+ PPC_INTERRUPT_EXT, /* External interrupt */
+ PPC_INTERRUPT_SMI, /* System management interrupt */
+ PPC_INTERRUPT_CEXT, /* Critical external interrupt */
+ PPC_INTERRUPT_DEBUG, /* External debug exception */
+ PPC_INTERRUPT_THERM, /* Thermal exception */
+ /* Internal hardware exception sources */
+ PPC_INTERRUPT_DECR, /* Decrementer exception */
+ PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */
+ PPC_INTERRUPT_PIT, /* Programmable inteval timer interrupt */
+ PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */
+ PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */
+ PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */
+ PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */
+ PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */
+};
+
+/*****************************************************************************/
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->nip;
+ *cs_base = 0;
+ *flags = env->hflags;
+}
+
+static inline void cpu_set_tls(CPUState *env, target_ulong newtls)
+{
+#if defined(TARGET_PPC64)
+ /* The kernel checks TIF_32BIT here; we don't support loading 32-bit
+ binaries on PPC64 yet. */
+ env->gpr[13] = newtls;
+#else
+ env->gpr[2] = newtls;
+#endif
+}
+
+/* hidden flags (hflags) - used internally by qemu to represent additional
+ * cpu states.
+ */
+#define HF_HALTED_SHIFT 1
+
+#define HF_HALTED_MASK 1<<HF_HALTED_SHIFT
+
+#endif /* !defined (__CPU_PPC_H__) */
diff --git a/target-ppc/exec.h b/target-ppc/exec.h
new file mode 100644
index 0000000..4688ef5
--- /dev/null
+++ b/target-ppc/exec.h
@@ -0,0 +1,57 @@
+/*
+ * PowerPC emulation definitions for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if !defined (__PPC_H__)
+#define __PPC_H__
+
+#include "config.h"
+
+#include "dyngen-exec.h"
+
+#include "cpu.h"
+#include "exec-all.h"
+
+register struct CPUPPCState *env asm(AREG0);
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+static inline int cpu_has_work(CPUState *env)
+{
+ return (msr_ee && (env->interrupt_request & CPU_INTERRUPT_HARD));
+}
+
+
+static inline int cpu_halted(CPUState *env)
+{
+ if (!env->halted)
+ return 0;
+ if (cpu_has_work(env)) {
+ env->halted = 0;
+ return 0;
+ }
+ return EXCP_HALTED;
+}
+
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+ env->nip = tb->pc;
+}
+
+#endif /* !defined (__PPC_H__) */
diff --git a/target-ppc/fake-exec.c b/target-ppc/fake-exec.c
new file mode 100644
index 0000000..259e06d
--- /dev/null
+++ b/target-ppc/fake-exec.c
@@ -0,0 +1,104 @@
+/*
+ * fake-exec.c
+ *
+ * This is a file for stub functions so that compilation is possible
+ * when TCG CPU emulation is disabled during compilation.
+ *
+ * Copyright 2007 IBM Corporation.
+ * Added by & Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * This work is licensed under the GNU GPL licence version 2 or later.
+ *
+ */
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+
+
+struct ppc_def_t {
+ const unsigned char *name;
+ uint32_t pvr;
+ uint32_t svr;
+ uint64_t insns_flags;
+ uint64_t msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ uint32_t flags;
+ int bfd_mach;
+ void (*init_proc)(CPUPPCState *env);
+ int (*check_pow)(CPUPPCState *env);
+};
+
+int code_copy_enabled = 0;
+
+void cpu_dump_state (CPUState *env, FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ int flags)
+{
+}
+
+void ppc_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+{
+}
+
+void cpu_dump_statistics (CPUState *env, FILE*f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ int flags)
+{
+}
+
+unsigned long code_gen_max_block_size(void)
+{
+ return 32;
+}
+
+void cpu_gen_init(void)
+{
+}
+
+int cpu_restore_state(TranslationBlock *tb,
+ CPUState *env, unsigned long searched_pc,
+ void *puc)
+
+{
+ return 0;
+}
+
+int cpu_ppc_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr)
+{
+ return 0;
+}
+
+void init_proc_ppc440ep_kvm(CPUPPCState *env)
+{
+ ppc40x_irq_init(env);
+}
+
+static ppc_def_t ppc440ep_kvm = {
+ .name = "440EP KVM",
+ .mmu_model = POWERPC_MMU_SOFT_4xx, /*XXX needed for GDB stub */
+ .init_proc = init_proc_ppc440ep_kvm,
+};
+
+const ppc_def_t *cpu_ppc_find_by_name (const unsigned char *name)
+{
+ return &ppc440ep_kvm;
+}
+
+int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def)
+{
+ env->mmu_model = def->mmu_model;
+ (*def->init_proc)(env);
+ return 0;
+}
+
+void flush_icache_range(unsigned long start, unsigned long stop)
+{
+}
diff --git a/target-ppc/helper.c b/target-ppc/helper.c
new file mode 100644
index 0000000..23e3189
--- /dev/null
+++ b/target-ppc/helper.c
@@ -0,0 +1,2822 @@
+/*
+ * PowerPC emulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "helper_regs.h"
+#include "qemu-common.h"
+#include "kvm.h"
+#include "qemu-kvm.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_BATS
+//#define DEBUG_SLB
+//#define DEBUG_SOFTWARE_TLB
+//#define DUMP_PAGE_TABLES
+//#define DEBUG_EXCEPTIONS
+//#define FLUSH_ALL_TLBS
+
+#ifdef DEBUG_MMU
+# define LOG_MMU(...) qemu_log(__VA_ARGS__)
+# define LOG_MMU_STATE(env) log_cpu_state((env), 0)
+#else
+# define LOG_MMU(...) do { } while (0)
+# define LOG_MMU_STATE(...) do { } while (0)
+#endif
+
+
+#ifdef DEBUG_SOFTWARE_TLB
+# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_SWTLB(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_SLB
+# define LOG_SLB(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_SLB(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_EXCEPTIONS
+# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_EXCP(...) do { } while (0)
+#endif
+
+
+/*****************************************************************************/
+/* PowerPC MMU emulation */
+
+#if defined(CONFIG_USER_ONLY)
+int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ int exception, error_code;
+
+ if (rw == 2) {
+ exception = POWERPC_EXCP_ISI;
+ error_code = 0x40000000;
+ } else {
+ exception = POWERPC_EXCP_DSI;
+ error_code = 0x40000000;
+ if (rw)
+ error_code |= 0x02000000;
+ env->spr[SPR_DAR] = address;
+ env->spr[SPR_DSISR] = error_code;
+ }
+ env->exception_index = exception;
+ env->error_code = error_code;
+
+ return 1;
+}
+
+#else
+/* Common routines used by software and hardware TLBs emulation */
+static inline int pte_is_valid(target_ulong pte0)
+{
+ return pte0 & 0x80000000 ? 1 : 0;
+}
+
+static inline void pte_invalidate(target_ulong *pte0)
+{
+ *pte0 &= ~0x80000000;
+}
+
+#if defined(TARGET_PPC64)
+static inline int pte64_is_valid(target_ulong pte0)
+{
+ return pte0 & 0x0000000000000001ULL ? 1 : 0;
+}
+
+static inline void pte64_invalidate(target_ulong *pte0)
+{
+ *pte0 &= ~0x0000000000000001ULL;
+}
+#endif
+
+#define PTE_PTEM_MASK 0x7FFFFFBF
+#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+#if defined(TARGET_PPC64)
+#define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
+#define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
+#endif
+
+static inline int pp_check(int key, int pp, int nx)
+{
+ int access;
+
+ /* Compute access rights */
+ /* When pp is 3/7, the result is undefined. Set it to noaccess */
+ access = 0;
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ access |= PAGE_WRITE;
+ /* No break here */
+ case 0x3:
+ case 0x6:
+ access |= PAGE_READ;
+ break;
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ case 0x6:
+ access = 0;
+ break;
+ case 0x1:
+ case 0x3:
+ access = PAGE_READ;
+ break;
+ case 0x2:
+ access = PAGE_READ | PAGE_WRITE;
+ break;
+ }
+ }
+ if (nx == 0)
+ access |= PAGE_EXEC;
+
+ return access;
+}
+
+static inline int check_prot(int prot, int rw, int access_type)
+{
+ int ret;
+
+ if (access_type == ACCESS_CODE) {
+ if (prot & PAGE_EXEC)
+ ret = 0;
+ else
+ ret = -2;
+ } else if (rw) {
+ if (prot & PAGE_WRITE)
+ ret = 0;
+ else
+ ret = -2;
+ } else {
+ if (prot & PAGE_READ)
+ ret = 0;
+ else
+ ret = -2;
+ }
+
+ return ret;
+}
+
+static inline int _pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0,
+ target_ulong pte1, int h, int rw, int type)
+{
+ target_ulong ptem, mmask;
+ int access, ret, pteh, ptev, pp;
+
+ ret = -1;
+ /* Check validity and table match */
+#if defined(TARGET_PPC64)
+ if (is_64b) {
+ ptev = pte64_is_valid(pte0);
+ pteh = (pte0 >> 1) & 1;
+ } else
+#endif
+ {
+ ptev = pte_is_valid(pte0);
+ pteh = (pte0 >> 6) & 1;
+ }
+ if (ptev && h == pteh) {
+ /* Check vsid & api */
+#if defined(TARGET_PPC64)
+ if (is_64b) {
+ ptem = pte0 & PTE64_PTEM_MASK;
+ mmask = PTE64_CHECK_MASK;
+ pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004);
+ ctx->nx = (pte1 >> 2) & 1; /* No execute bit */
+ ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */
+ } else
+#endif
+ {
+ ptem = pte0 & PTE_PTEM_MASK;
+ mmask = PTE_CHECK_MASK;
+ pp = pte1 & 0x00000003;
+ }
+ if (ptem == ctx->ptem) {
+ if (ctx->raddr != (target_phys_addr_t)-1ULL) {
+ /* all matches should have equal RPN, WIMG & PP */
+ if ((ctx->raddr & mmask) != (pte1 & mmask)) {
+ qemu_log("Bad RPN/WIMG/PP\n");
+ return -3;
+ }
+ }
+ /* Compute access rights */
+ access = pp_check(ctx->key, pp, ctx->nx);
+ /* Keep the matching PTE informations */
+ ctx->raddr = pte1;
+ ctx->prot = access;
+ ret = check_prot(ctx->prot, rw, type);
+ if (ret == 0) {
+ /* Access granted */
+ LOG_MMU("PTE access granted !\n");
+ } else {
+ /* Access right violation */
+ LOG_MMU("PTE access rejected\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0,
+ target_ulong pte1, int h, int rw, int type)
+{
+ return _pte_check(ctx, 0, pte0, pte1, h, rw, type);
+}
+
+#if defined(TARGET_PPC64)
+static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
+ target_ulong pte1, int h, int rw, int type)
+{
+ return _pte_check(ctx, 1, pte0, pte1, h, rw, type);
+}
+#endif
+
+static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
+ int ret, int rw)
+{
+ int store = 0;
+
+ /* Update page flags */
+ if (!(*pte1p & 0x00000100)) {
+ /* Update accessed flag */
+ *pte1p |= 0x00000100;
+ store = 1;
+ }
+ if (!(*pte1p & 0x00000080)) {
+ if (rw == 1 && ret == 0) {
+ /* Update changed flag */
+ *pte1p |= 0x00000080;
+ store = 1;
+ } else {
+ /* Force page fault for first write access */
+ ctx->prot &= ~PAGE_WRITE;
+ }
+ }
+
+ return store;
+}
+
+/* Software driven TLB helpers */
+static inline int ppc6xx_tlb_getnum(CPUState *env, target_ulong eaddr, int way,
+ int is_code)
+{
+ int nr;
+
+ /* Select TLB num in a way from address */
+ nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
+ /* Select TLB way */
+ nr += env->tlb_per_way * way;
+ /* 6xx have separate TLBs for instructions and data */
+ if (is_code && env->id_tlbs == 1)
+ nr += env->nb_tlb;
+
+ return nr;
+}
+
+static inline void ppc6xx_tlb_invalidate_all(CPUState *env)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr, max;
+
+ //LOG_SWTLB("Invalidate all TLBs\n");
+ /* Invalidate all defined software TLB */
+ max = env->nb_tlb;
+ if (env->id_tlbs == 1)
+ max *= 2;
+ for (nr = 0; nr < max; nr++) {
+ tlb = &env->tlb[nr].tlb6;
+ pte_invalidate(&tlb->pte0);
+ }
+ tlb_flush(env, 1);
+}
+
+static inline void __ppc6xx_tlb_invalidate_virt(CPUState *env,
+ target_ulong eaddr,
+ int is_code, int match_epn)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ ppc6xx_tlb_t *tlb;
+ int way, nr;
+
+ /* Invalidate ITLB + DTLB, all ways */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
+ tlb = &env->tlb[nr].tlb6;
+ if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
+ LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
+ env->nb_tlb, eaddr);
+ pte_invalidate(&tlb->pte0);
+ tlb_flush_page(env, tlb->EPN);
+ }
+ }
+#else
+ /* XXX: PowerPC specification say this is valid as well */
+ ppc6xx_tlb_invalidate_all(env);
+#endif
+}
+
+static inline void ppc6xx_tlb_invalidate_virt(CPUState *env,
+ target_ulong eaddr, int is_code)
+{
+ __ppc6xx_tlb_invalidate_virt(env, eaddr, is_code, 0);
+}
+
+void ppc6xx_tlb_store (CPUState *env, target_ulong EPN, int way, int is_code,
+ target_ulong pte0, target_ulong pte1)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr;
+
+ nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
+ tlb = &env->tlb[nr].tlb6;
+ LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
+ /* Invalidate any pending reference in Qemu for this virtual address */
+ __ppc6xx_tlb_invalidate_virt(env, EPN, is_code, 1);
+ tlb->pte0 = pte0;
+ tlb->pte1 = pte1;
+ tlb->EPN = EPN;
+ /* Store last way for LRU mechanism */
+ env->last_way = way;
+}
+
+static inline int ppc6xx_tlb_check(CPUState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw, int access_type)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr, best, way;
+ int ret;
+
+ best = -1;
+ ret = -1; /* No TLB found */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way,
+ access_type == ACCESS_CODE ? 1 : 0);
+ tlb = &env->tlb[nr].tlb6;
+ /* This test "emulates" the PTE index match for hardware TLBs */
+ if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
+ LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
+ "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
+ continue;
+ }
+ LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, eaddr, tlb->pte1,
+ rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
+ switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
+ case -3:
+ /* TLB inconsistency */
+ return -1;
+ case -2:
+ /* Access violation */
+ ret = -2;
+ best = nr;
+ break;
+ case -1:
+ default:
+ /* No match */
+ break;
+ case 0:
+ /* access granted */
+ /* XXX: we should go on looping to check all TLBs consistency
+ * but we can speed-up the whole thing as the
+ * result would be undefined if TLBs are not consistent.
+ */
+ ret = 0;
+ best = nr;
+ goto done;
+ }
+ }
+ if (best != -1) {
+ done:
+ LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
+ ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ /* Update page flags */
+ pte_update_flags(ctx, &env->tlb[best].tlb6.pte1, ret, rw);
+ }
+
+ return ret;
+}
+
+/* Perform BAT hit & translation */
+static inline void bat_size_prot(CPUState *env, target_ulong *blp, int *validp,
+ int *protp, target_ulong *BATu,
+ target_ulong *BATl)
+{
+ target_ulong bl;
+ int pp, valid, prot;
+
+ bl = (*BATu & 0x00001FFC) << 15;
+ valid = 0;
+ prot = 0;
+ if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
+ ((msr_pr != 0) && (*BATu & 0x00000001))) {
+ valid = 1;
+ pp = *BATl & 0x00000003;
+ if (pp != 0) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2)
+ prot |= PAGE_WRITE;
+ }
+ }
+ *blp = bl;
+ *validp = valid;
+ *protp = prot;
+}
+
+static inline void bat_601_size_prot(CPUState *env, target_ulong *blp,
+ int *validp, int *protp,
+ target_ulong *BATu, target_ulong *BATl)
+{
+ target_ulong bl;
+ int key, pp, valid, prot;
+
+ bl = (*BATl & 0x0000003F) << 17;
+ LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n",
+ (uint8_t)(*BATl & 0x0000003F), bl, ~bl);
+ prot = 0;
+ valid = (*BATl >> 6) & 1;
+ if (valid) {
+ pp = *BATu & 0x00000003;
+ if (msr_pr == 0)
+ key = (*BATu >> 3) & 1;
+ else
+ key = (*BATu >> 2) & 1;
+ prot = pp_check(key, pp, 0);
+ }
+ *blp = bl;
+ *validp = valid;
+ *protp = prot;
+}
+
+static inline int get_bat(CPUState *env, mmu_ctx_t *ctx, target_ulong virtual,
+ int rw, int type)
+{
+ target_ulong *BATlt, *BATut, *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+ int i, valid, prot;
+ int ret = -1;
+
+ LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ type == ACCESS_CODE ? 'I' : 'D', virtual);
+ switch (type) {
+ case ACCESS_CODE:
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ break;
+ default:
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ break;
+ }
+ for (i = 0; i < env->nb_BATs; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
+ bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl);
+ } else {
+ bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
+ }
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n", __func__,
+ type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
+ if ((virtual & 0xF0000000) == BEPIu &&
+ ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
+ /* BAT matches */
+ if (valid != 0) {
+ /* Get physical address */
+ ctx->raddr = (*BATl & 0xF0000000) |
+ ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
+ (virtual & 0x0001F000);
+ /* Compute access rights */
+ ctx->prot = prot;
+ ret = check_prot(ctx->prot, rw, type);
+ if (ret == 0)
+ LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
+ i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
+ ctx->prot & PAGE_WRITE ? 'W' : '-');
+ break;
+ }
+ }
+ }
+ if (ret < 0) {
+#if defined(DEBUG_BATS)
+ if (qemu_log_enabled()) {
+ LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
+ for (i = 0; i < 4; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bl = (*BATu & 0x00001FFC) << 15;
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx " \n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+ }
+#endif
+ }
+ /* No hit */
+ return ret;
+}
+
+/* PTE table lookup */
+static inline int _find_pte(mmu_ctx_t *ctx, int is_64b, int h, int rw,
+ int type, int target_page_bits)
+{
+ target_ulong base, pte0, pte1;
+ int i, good = -1;
+ int ret, r;
+
+ ret = -1; /* No entry found */
+ base = ctx->pg_addr[h];
+ for (i = 0; i < 8; i++) {
+#if defined(TARGET_PPC64)
+ if (is_64b) {
+ pte0 = ldq_phys(base + (i * 16));
+ pte1 = ldq_phys(base + (i * 16) + 8);
+
+ /* We have a TLB that saves 4K pages, so let's
+ * split a huge page to 4k chunks */
+ if (target_page_bits != TARGET_PAGE_BITS)
+ pte1 |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
+ & TARGET_PAGE_MASK;
+
+ r = pte64_check(ctx, pte0, pte1, h, rw, type);
+ LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
+ base + (i * 16), pte0, pte1, (int)(pte0 & 1), h,
+ (int)((pte0 >> 1) & 1), ctx->ptem);
+ } else
+#endif
+ {
+ pte0 = ldl_phys(base + (i * 8));
+ pte1 = ldl_phys(base + (i * 8) + 4);
+ r = pte32_check(ctx, pte0, pte1, h, rw, type);
+ LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
+ base + (i * 8), pte0, pte1, (int)(pte0 >> 31), h,
+ (int)((pte0 >> 6) & 1), ctx->ptem);
+ }
+ switch (r) {
+ case -3:
+ /* PTE inconsistency */
+ return -1;
+ case -2:
+ /* Access violation */
+ ret = -2;
+ good = i;
+ break;
+ case -1:
+ default:
+ /* No PTE match */
+ break;
+ case 0:
+ /* access granted */
+ /* XXX: we should go on looping to check all PTEs consistency
+ * but if we can speed-up the whole thing as the
+ * result would be undefined if PTEs are not consistent.
+ */
+ ret = 0;
+ good = i;
+ goto done;
+ }
+ }
+ if (good != -1) {
+ done:
+ LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n",
+ ctx->raddr, ctx->prot, ret);
+ /* Update page flags */
+ pte1 = ctx->raddr;
+ if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
+#if defined(TARGET_PPC64)
+ if (is_64b) {
+ stq_phys_notdirty(base + (good * 16) + 8, pte1);
+ } else
+#endif
+ {
+ stl_phys_notdirty(base + (good * 8) + 4, pte1);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static inline int find_pte32(mmu_ctx_t *ctx, int h, int rw, int type,
+ int target_page_bits)
+{
+ return _find_pte(ctx, 0, h, rw, type, target_page_bits);
+}
+
+#if defined(TARGET_PPC64)
+static inline int find_pte64(mmu_ctx_t *ctx, int h, int rw, int type,
+ int target_page_bits)
+{
+ return _find_pte(ctx, 1, h, rw, type, target_page_bits);
+}
+#endif
+
+static inline int find_pte(CPUState *env, mmu_ctx_t *ctx, int h, int rw,
+ int type, int target_page_bits)
+{
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64)
+ return find_pte64(ctx, h, rw, type, target_page_bits);
+#endif
+
+ return find_pte32(ctx, h, rw, type, target_page_bits);
+}
+
+#if defined(TARGET_PPC64)
+static ppc_slb_t *slb_get_entry(CPUPPCState *env, int nr)
+{
+ ppc_slb_t *retval = &env->slb[nr];
+
+#if 0 // XXX implement bridge mode?
+ if (env->spr[SPR_ASR] & 1) {
+ target_phys_addr_t sr_base;
+
+ sr_base = env->spr[SPR_ASR] & 0xfffffffffffff000;
+ sr_base += (12 * nr);
+
+ retval->tmp64 = ldq_phys(sr_base);
+ retval->tmp = ldl_phys(sr_base + 8);
+ }
+#endif
+
+ return retval;
+}
+
+static void slb_set_entry(CPUPPCState *env, int nr, ppc_slb_t *slb)
+{
+ ppc_slb_t *entry = &env->slb[nr];
+
+ if (slb == entry)
+ return;
+
+ entry->tmp64 = slb->tmp64;
+ entry->tmp = slb->tmp;
+}
+
+static inline int slb_is_valid(ppc_slb_t *slb)
+{
+ return (int)(slb->tmp64 & 0x0000000008000000ULL);
+}
+
+static inline void slb_invalidate(ppc_slb_t *slb)
+{
+ slb->tmp64 &= ~0x0000000008000000ULL;
+}
+
+static inline int slb_lookup(CPUPPCState *env, target_ulong eaddr,
+ target_ulong *vsid, target_ulong *page_mask,
+ int *attr, int *target_page_bits)
+{
+ target_ulong mask;
+ int n, ret;
+
+ ret = -5;
+ LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
+ mask = 0x0000000000000000ULL; /* Avoid gcc warning */
+ for (n = 0; n < env->slb_nr; n++) {
+ ppc_slb_t *slb = slb_get_entry(env, n);
+
+ LOG_SLB("%s: seg %d %016" PRIx64 " %08"
+ PRIx32 "\n", __func__, n, slb->tmp64, slb->tmp);
+ if (slb_is_valid(slb)) {
+ /* SLB entry is valid */
+ mask = 0xFFFFFFFFF0000000ULL;
+ if (slb->tmp & 0x8) {
+ /* 16 MB PTEs */
+ if (target_page_bits)
+ *target_page_bits = 24;
+ } else {
+ /* 4 KB PTEs */
+ if (target_page_bits)
+ *target_page_bits = TARGET_PAGE_BITS;
+ }
+ if ((eaddr & mask) == (slb->tmp64 & mask)) {
+ /* SLB match */
+ *vsid = ((slb->tmp64 << 24) | (slb->tmp >> 8)) & 0x0003FFFFFFFFFFFFULL;
+ *page_mask = ~mask;
+ *attr = slb->tmp & 0xFF;
+ ret = n;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void ppc_slb_invalidate_all (CPUPPCState *env)
+{
+ int n, do_invalidate;
+
+ do_invalidate = 0;
+ /* XXX: Warning: slbia never invalidates the first segment */
+ for (n = 1; n < env->slb_nr; n++) {
+ ppc_slb_t *slb = slb_get_entry(env, n);
+
+ if (slb_is_valid(slb)) {
+ slb_invalidate(slb);
+ slb_set_entry(env, n, slb);
+ /* XXX: given the fact that segment size is 256 MB or 1TB,
+ * and we still don't have a tlb_flush_mask(env, n, mask)
+ * in Qemu, we just invalidate all TLBs
+ */
+ do_invalidate = 1;
+ }
+ }
+ if (do_invalidate)
+ tlb_flush(env, 1);
+}
+
+void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
+{
+ target_ulong vsid, page_mask;
+ int attr;
+ int n;
+
+ n = slb_lookup(env, T0, &vsid, &page_mask, &attr, NULL);
+ if (n >= 0) {
+ ppc_slb_t *slb = slb_get_entry(env, n);
+
+ if (slb_is_valid(slb)) {
+ slb_invalidate(slb);
+ slb_set_entry(env, n, slb);
+ /* XXX: given the fact that segment size is 256 MB or 1TB,
+ * and we still don't have a tlb_flush_mask(env, n, mask)
+ * in Qemu, we just invalidate all TLBs
+ */
+ tlb_flush(env, 1);
+ }
+ }
+}
+
+target_ulong ppc_load_slb (CPUPPCState *env, int slb_nr)
+{
+ target_ulong rt;
+ ppc_slb_t *slb = slb_get_entry(env, slb_nr);
+
+ if (slb_is_valid(slb)) {
+ /* SLB entry is valid */
+ /* Copy SLB bits 62:88 to Rt 37:63 (VSID 23:49) */
+ rt = slb->tmp >> 8; /* 65:88 => 40:63 */
+ rt |= (slb->tmp64 & 0x7) << 24; /* 62:64 => 37:39 */
+ /* Copy SLB bits 89:92 to Rt 33:36 (KsKpNL) */
+ rt |= ((slb->tmp >> 4) & 0xF) << 27;
+ } else {
+ rt = 0;
+ }
+ LOG_SLB("%s: %016" PRIx64 " %08" PRIx32 " => %d "
+ TARGET_FMT_lx "\n", __func__, slb->tmp64, slb->tmp, slb_nr, rt);
+
+ return rt;
+}
+
+void ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs)
+{
+ ppc_slb_t *slb;
+
+ uint64_t vsid;
+ uint64_t esid;
+ int flags, valid, slb_nr;
+
+ vsid = rs >> 12;
+ flags = ((rs >> 8) & 0xf);
+
+ esid = rb >> 28;
+ valid = (rb & (1 << 27));
+ slb_nr = rb & 0xfff;
+
+ slb = slb_get_entry(env, slb_nr);
+ slb->tmp64 = (esid << 28) | valid | (vsid >> 24);
+ slb->tmp = (vsid << 8) | (flags << 3);
+
+ LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
+ " %08" PRIx32 "\n", __func__, slb_nr, rb, rs, slb->tmp64,
+ slb->tmp);
+
+ slb_set_entry(env, slb_nr, slb);
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* Perform segment based translation */
+static inline target_phys_addr_t get_pgaddr(target_phys_addr_t sdr1,
+ int sdr_sh,
+ target_phys_addr_t hash,
+ target_phys_addr_t mask)
+{
+ return (sdr1 & ((target_phys_addr_t)(-1ULL) << sdr_sh)) | (hash & mask);
+}
+
+static inline int get_segment(CPUState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw, int type)
+{
+ target_phys_addr_t sdr, hash, mask, sdr_mask, htab_mask;
+ target_ulong sr, vsid, vsid_mask, pgidx, page_mask;
+#if defined(TARGET_PPC64)
+ int attr;
+#endif
+ int ds, vsid_sh, sdr_sh, pr, target_page_bits;
+ int ret, ret2;
+
+ pr = msr_pr;
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ LOG_MMU("Check SLBs\n");
+ ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr,
+ &target_page_bits);
+ if (ret < 0)
+ return ret;
+ ctx->key = ((attr & 0x40) && (pr != 0)) ||
+ ((attr & 0x80) && (pr == 0)) ? 1 : 0;
+ ds = 0;
+ ctx->nx = attr & 0x10 ? 1 : 0;
+ ctx->eaddr = eaddr;
+ vsid_mask = 0x00003FFFFFFFFF80ULL;
+ vsid_sh = 7;
+ sdr_sh = 18;
+ sdr_mask = 0x3FF80;
+ } else
+#endif /* defined(TARGET_PPC64) */
+ {
+ sr = env->sr[eaddr >> 28];
+ page_mask = 0x0FFFFFFF;
+ ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
+ ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
+ ds = sr & 0x80000000 ? 1 : 0;
+ ctx->nx = sr & 0x10000000 ? 1 : 0;
+ vsid = sr & 0x00FFFFFF;
+ vsid_mask = 0x01FFFFC0;
+ vsid_sh = 6;
+ sdr_sh = 16;
+ sdr_mask = 0xFFC0;
+ target_page_bits = TARGET_PAGE_BITS;
+ LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip="
+ TARGET_FMT_lx " lr=" TARGET_FMT_lx
+ " ir=%d dr=%d pr=%d %d t=%d\n",
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
+ (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
+ }
+ LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
+ ctx->key, ds, ctx->nx, vsid);
+ ret = -1;
+ if (!ds) {
+ /* Check if instruction fetch is allowed, if needed */
+ if (type != ACCESS_CODE || ctx->nx == 0) {
+ /* Page address translation */
+ /* Primary table address */
+ sdr = env->sdr1;
+ pgidx = (eaddr & page_mask) >> target_page_bits;
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F));
+ /* XXX: this is false for 1 TB segments */
+ hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask;
+ } else
+#endif
+ {
+ htab_mask = sdr & 0x000001FF;
+ hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask;
+ }
+ mask = (htab_mask << sdr_sh) | sdr_mask;
+ LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx
+ " mask " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
+ sdr, sdr_sh, hash, mask, page_mask);
+ ctx->pg_addr[0] = get_pgaddr(sdr, sdr_sh, hash, mask);
+ /* Secondary table address */
+ hash = (~hash) & vsid_mask;
+ LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx
+ " mask " TARGET_FMT_plx "\n", sdr, sdr_sh, hash, mask);
+ ctx->pg_addr[1] = get_pgaddr(sdr, sdr_sh, hash, mask);
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ /* Only 5 bits of the page index are used in the AVPN */
+ if (target_page_bits > 23) {
+ ctx->ptem = (vsid << 12) |
+ ((pgidx << (target_page_bits - 16)) & 0xF80);
+ } else {
+ ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80);
+ }
+ } else
+#endif
+ {
+ ctx->ptem = (vsid << 7) | (pgidx >> 10);
+ }
+ /* Initialize real address with an invalid value */
+ ctx->raddr = (target_phys_addr_t)-1ULL;
+ if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx ||
+ env->mmu_model == POWERPC_MMU_SOFT_74xx)) {
+ /* Software TLB search */
+ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
+ } else {
+ LOG_MMU("0 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " "
+ "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx
+ " pg_addr=" TARGET_FMT_plx "\n",
+ sdr, vsid, pgidx, hash, ctx->pg_addr[0]);
+ /* Primary table lookup */
+ ret = find_pte(env, ctx, 0, rw, type, target_page_bits);
+ if (ret < 0) {
+ /* Secondary table lookup */
+ if (eaddr != 0xEFFFFFFF)
+ LOG_MMU("1 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " "
+ "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx
+ " pg_addr=" TARGET_FMT_plx "\n", sdr, vsid,
+ pgidx, hash, ctx->pg_addr[1]);
+ ret2 = find_pte(env, ctx, 1, rw, type,
+ target_page_bits);
+ if (ret2 != -1)
+ ret = ret2;
+ }
+ }
+#if defined (DUMP_PAGE_TABLES)
+ if (qemu_log_enabled()) {
+ target_phys_addr_t curaddr;
+ uint32_t a0, a1, a2, a3;
+ qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
+ "\n", sdr, mask + 0x80);
+ for (curaddr = sdr; curaddr < (sdr + mask + 0x80);
+ curaddr += 16) {
+ a0 = ldl_phys(curaddr);
+ a1 = ldl_phys(curaddr + 4);
+ a2 = ldl_phys(curaddr + 8);
+ a3 = ldl_phys(curaddr + 12);
+ if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
+ qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
+ curaddr, a0, a1, a2, a3);
+ }
+ }
+ }
+#endif
+ } else {
+ LOG_MMU("No access allowed\n");
+ ret = -3;
+ }
+ } else {
+ LOG_MMU("direct store...\n");
+ /* Direct-store segment : absolutely *BUGGY* for now */
+ switch (type) {
+ case ACCESS_INT:
+ /* Integer load/store : only access allowed */
+ break;
+ case ACCESS_CODE:
+ /* No code fetch is allowed in direct-store areas */
+ return -4;
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ return -4;
+ case ACCESS_RES:
+ /* lwarx, ldarx or srwcx. */
+ return -4;
+ case ACCESS_CACHE:
+ /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
+ /* Should make the instruction do no-op.
+ * As it already do no-op, it's quite easy :-)
+ */
+ ctx->raddr = eaddr;
+ return 0;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ return -4;
+ default:
+ qemu_log("ERROR: instruction should not need "
+ "address translation\n");
+ return -4;
+ }
+ if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
+ ctx->raddr = eaddr;
+ ret = 2;
+ } else {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+/* Generic TLB check function for embedded PowerPC implementations */
+static inline int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb,
+ target_phys_addr_t *raddrp,
+ target_ulong address, uint32_t pid, int ext,
+ int i)
+{
+ target_ulong mask;
+
+ /* Check valid flag */
+ if (!(tlb->prot & PAGE_VALID)) {
+ return -1;
+ }
+ mask = ~(tlb->size - 1);
+ LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
+ " " TARGET_FMT_lx " %u\n", __func__, i, address, pid, tlb->EPN,
+ mask, (uint32_t)tlb->PID);
+ /* Check PID */
+ if (tlb->PID != 0 && tlb->PID != pid)
+ return -1;
+ /* Check effective address */
+ if ((address & mask) != tlb->EPN)
+ return -1;
+ *raddrp = (tlb->RPN & mask) | (address & ~mask);
+#if (TARGET_PHYS_ADDR_BITS >= 36)
+ if (ext) {
+ /* Extend the physical address to 36 bits */
+ *raddrp |= (target_phys_addr_t)(tlb->RPN & 0xF) << 32;
+ }
+#endif
+
+ return 0;
+}
+
+/* Generic TLB search function for PowerPC embedded implementations */
+int ppcemb_tlb_search (CPUPPCState *env, target_ulong address, uint32_t pid)
+{
+ ppcemb_tlb_t *tlb;
+ target_phys_addr_t raddr;
+ int i, ret;
+
+ /* Default return value is no match */
+ ret = -1;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb[i].tlbe;
+ if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Helpers specific to PowerPC 40x implementations */
+static inline void ppc4xx_tlb_invalidate_all(CPUState *env)
+{
+ ppcemb_tlb_t *tlb;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb[i].tlbe;
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb_flush(env, 1);
+}
+
+static inline void ppc4xx_tlb_invalidate_virt(CPUState *env,
+ target_ulong eaddr, uint32_t pid)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ ppcemb_tlb_t *tlb;
+ target_phys_addr_t raddr;
+ target_ulong page, end;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb[i].tlbe;
+ if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
+ end = tlb->EPN + tlb->size;
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
+ tlb_flush_page(env, page);
+ tlb->prot &= ~PAGE_VALID;
+ break;
+ }
+ }
+#else
+ ppc4xx_tlb_invalidate_all(env);
+#endif
+}
+
+static int mmu40x_get_physical_address (CPUState *env, mmu_ctx_t *ctx,
+ target_ulong address, int rw, int access_type)
+{
+ ppcemb_tlb_t *tlb;
+ target_phys_addr_t raddr;
+ int i, ret, zsel, zpr, pr;
+
+ ret = -1;
+ raddr = (target_phys_addr_t)-1ULL;
+ pr = msr_pr;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb[i].tlbe;
+ if (ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_40x_PID], 0, i) < 0)
+ continue;
+ zsel = (tlb->attr >> 4) & 0xF;
+ zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
+ LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
+ __func__, i, zsel, zpr, rw, tlb->attr);
+ /* Check execute enable bit */
+ switch (zpr) {
+ case 0x2:
+ if (pr != 0)
+ goto check_perms;
+ /* No break here */
+ case 0x3:
+ /* All accesses granted */
+ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = 0;
+ break;
+ case 0x0:
+ if (pr != 0) {
+ /* Raise Zone protection fault. */
+ env->spr[SPR_40x_ESR] = 1 << 22;
+ ctx->prot = 0;
+ ret = -2;
+ break;
+ }
+ /* No break here */
+ case 0x1:
+ check_perms:
+ /* Check from TLB entry */
+ ctx->prot = tlb->prot;
+ ret = check_prot(ctx->prot, rw, access_type);
+ if (ret == -2)
+ env->spr[SPR_40x_ESR] = 0;
+ break;
+ }
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ return 0;
+ }
+ }
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+
+ return ret;
+}
+
+void store_40x_sler (CPUPPCState *env, uint32_t val)
+{
+ /* XXX: TO BE FIXED */
+ if (val != 0x00000000) {
+ cpu_abort(env, "Little-endian regions are not supported by now\n");
+ }
+ env->spr[SPR_405_SLER] = val;
+}
+
+static int mmubooke_get_physical_address (CPUState *env, mmu_ctx_t *ctx,
+ target_ulong address, int rw,
+ int access_type)
+{
+ ppcemb_tlb_t *tlb;
+ target_phys_addr_t raddr;
+ int i, prot, ret;
+
+ ret = -1;
+ raddr = (target_phys_addr_t)-1ULL;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb[i].tlbe;
+ if (ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_BOOKE_PID], 1, i) < 0)
+ continue;
+ if (msr_pr != 0)
+ prot = tlb->prot & 0xF;
+ else
+ prot = (tlb->prot >> 4) & 0xF;
+ /* Check the address space */
+ if (access_type == ACCESS_CODE) {
+ if (msr_ir != (tlb->attr & 1))
+ continue;
+ ctx->prot = prot;
+ if (prot & PAGE_EXEC) {
+ ret = 0;
+ break;
+ }
+ ret = -3;
+ } else {
+ if (msr_dr != (tlb->attr & 1))
+ continue;
+ ctx->prot = prot;
+ if ((!rw && prot & PAGE_READ) || (rw && (prot & PAGE_WRITE))) {
+ ret = 0;
+ break;
+ }
+ ret = -2;
+ }
+ }
+ if (ret >= 0)
+ ctx->raddr = raddr;
+
+ return ret;
+}
+
+static inline int check_physical(CPUState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw)
+{
+ int in_plb, ret;
+
+ ctx->raddr = eaddr;
+ ctx->prot = PAGE_READ | PAGE_EXEC;
+ ret = 0;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_REAL:
+ case POWERPC_MMU_BOOKE:
+ ctx->prot |= PAGE_WRITE;
+ break;
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+ /* Real address are 60 bits long */
+ ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL;
+ ctx->prot |= PAGE_WRITE;
+ break;
+#endif
+ case POWERPC_MMU_SOFT_4xx_Z:
+ if (unlikely(msr_pe != 0)) {
+ /* 403 family add some particular protections,
+ * using PBL/PBU registers for accesses with no translation.
+ */
+ in_plb =
+ /* Check PLB validity */
+ (env->pb[0] < env->pb[1] &&
+ /* and address in plb area */
+ eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
+ (env->pb[2] < env->pb[3] &&
+ eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
+ if (in_plb ^ msr_px) {
+ /* Access in protected area */
+ if (rw == 1) {
+ /* Access is not allowed */
+ ret = -2;
+ }
+ } else {
+ /* Read-write access is allowed */
+ ctx->prot |= PAGE_WRITE;
+ }
+ }
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE FSL MMU model not implemented\n");
+ break;
+ default:
+ cpu_abort(env, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr,
+ int rw, int access_type)
+{
+ int ret;
+
+#if 0
+ qemu_log("%s\n", __func__);
+#endif
+ if ((access_type == ACCESS_CODE && msr_ir == 0) ||
+ (access_type != ACCESS_CODE && msr_dr == 0)) {
+ if (env->mmu_model == POWERPC_MMU_BOOKE) {
+ /* The BookE MMU always performs address translation. The
+ IS and DS bits only affect the address space. */
+ ret = mmubooke_get_physical_address(env, ctx, eaddr,
+ rw, access_type);
+ } else {
+ /* No address translation. */
+ ret = check_physical(env, ctx, eaddr, rw);
+ }
+ } else {
+ ret = -1;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ /* Try to find a BAT */
+ if (env->nb_BATs != 0)
+ ret = get_bat(env, ctx, eaddr, rw, access_type);
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+#endif
+ if (ret < 0) {
+ /* We didn't match any BAT entry or don't have BATs */
+ ret = get_segment(env, ctx, eaddr, rw, access_type);
+ }
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ ret = mmu40x_get_physical_address(env, ctx, eaddr,
+ rw, access_type);
+ break;
+ case POWERPC_MMU_BOOKE:
+ ret = mmubooke_get_physical_address(env, ctx, eaddr,
+ rw, access_type);
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE FSL MMU model not implemented\n");
+ return -1;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env, "PowerPC in real mode do not do any translation\n");
+ return -1;
+ default:
+ cpu_abort(env, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+ }
+#if 0
+ qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n",
+ __func__, eaddr, ret, ctx->raddr);
+#endif
+
+ return ret;
+}
+
+target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
+{
+ mmu_ctx_t ctx;
+
+ if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0))
+ return -1;
+
+ return ctx.raddr & TARGET_PAGE_MASK;
+}
+
+/* Perform address translation */
+int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ mmu_ctx_t ctx;
+ int access_type;
+ int ret = 0;
+
+ if (rw == 2) {
+ /* code access */
+ rw = 0;
+ access_type = ACCESS_CODE;
+ } else {
+ /* data access */
+ access_type = env->access_type;
+ }
+ ret = get_physical_address(env, &ctx, address, rw, access_type);
+ if (ret == 0) {
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0) {
+ LOG_MMU_STATE(env);
+ if (access_type == ACCESS_CODE) {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ env->exception_index = POWERPC_EXCP_IFTLB;
+ env->error_code = 1 << 18;
+ env->spr[SPR_IMISS] = address;
+ env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
+ goto tlb_miss;
+ case POWERPC_MMU_SOFT_74xx:
+ env->exception_index = POWERPC_EXCP_IFTLB;
+ goto tlb_miss_74xx;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ env->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = address;
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+#endif
+ env->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x40000000;
+ break;
+ case POWERPC_MMU_BOOKE:
+ env->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = address;
+ return -1;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE FSL MMU model is not implemented\n");
+ return -1;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ return -1;
+ default:
+ cpu_abort(env, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ env->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ break;
+ case -3:
+ /* No execute protection violation */
+ if (env->mmu_model == POWERPC_MMU_BOOKE) {
+ env->spr[SPR_BOOKE_ESR] = 0x00000000;
+ }
+ env->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+ case -4:
+ /* Direct store exception */
+ /* No code fetch is allowed in direct-store areas */
+ env->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+#if defined(TARGET_PPC64)
+ case -5:
+ /* No match in segment table */
+ if (env->mmu_model == POWERPC_MMU_620) {
+ env->exception_index = POWERPC_EXCP_ISI;
+ /* XXX: this might be incorrect */
+ env->error_code = 0x40000000;
+ } else {
+ env->exception_index = POWERPC_EXCP_ISEG;
+ env->error_code = 0;
+ }
+ break;
+#endif
+ }
+ } else {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ if (rw == 1) {
+ env->exception_index = POWERPC_EXCP_DSTLB;
+ env->error_code = 1 << 16;
+ } else {
+ env->exception_index = POWERPC_EXCP_DLTLB;
+ env->error_code = 0;
+ }
+ env->spr[SPR_DMISS] = address;
+ env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
+ tlb_miss:
+ env->error_code |= ctx.key << 19;
+ env->spr[SPR_HASH1] = ctx.pg_addr[0];
+ env->spr[SPR_HASH2] = ctx.pg_addr[1];
+ break;
+ case POWERPC_MMU_SOFT_74xx:
+ if (rw == 1) {
+ env->exception_index = POWERPC_EXCP_DSTLB;
+ } else {
+ env->exception_index = POWERPC_EXCP_DLTLB;
+ }
+ tlb_miss_74xx:
+ /* Implement LRU algorithm */
+ env->error_code = ctx.key << 19;
+ env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
+ ((env->last_way + 1) & (env->nb_ways - 1));
+ env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ env->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = address;
+ if (rw)
+ env->spr[SPR_40x_ESR] = 0x00800000;
+ else
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+#endif
+ env->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ if (rw == 1)
+ env->spr[SPR_DSISR] = 0x42000000;
+ else
+ env->spr[SPR_DSISR] = 0x40000000;
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE:
+ env->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = address;
+ env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0;
+ return -1;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE FSL MMU model is not implemented\n");
+ return -1;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ return -1;
+ default:
+ cpu_abort(env, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ env->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ if (env->mmu_model == POWERPC_MMU_SOFT_4xx
+ || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
+ env->spr[SPR_40x_DEAR] = address;
+ if (rw) {
+ env->spr[SPR_40x_ESR] |= 0x00800000;
+ }
+ } else if (env->mmu_model == POWERPC_MMU_BOOKE) {
+ env->spr[SPR_BOOKE_DEAR] = address;
+ env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0;
+ } else {
+ env->spr[SPR_DAR] = address;
+ if (rw == 1) {
+ env->spr[SPR_DSISR] = 0x0A000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ break;
+ case -4:
+ /* Direct store exception */
+ switch (access_type) {
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ env->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = POWERPC_EXCP_ALIGN_FP;
+ env->spr[SPR_DAR] = address;
+ break;
+ case ACCESS_RES:
+ /* lwarx, ldarx or stwcx. */
+ env->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ if (rw == 1)
+ env->spr[SPR_DSISR] = 0x06000000;
+ else
+ env->spr[SPR_DSISR] = 0x04000000;
+ break;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ env->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ if (rw == 1)
+ env->spr[SPR_DSISR] = 0x06100000;
+ else
+ env->spr[SPR_DSISR] = 0x04100000;
+ break;
+ default:
+ printf("DSI: invalid exception (%d)\n", ret);
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code =
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
+ env->spr[SPR_DAR] = address;
+ break;
+ }
+ break;
+#if defined(TARGET_PPC64)
+ case -5:
+ /* No match in segment table */
+ if (env->mmu_model == POWERPC_MMU_620) {
+ env->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ /* XXX: this might be incorrect */
+ if (rw == 1)
+ env->spr[SPR_DSISR] = 0x42000000;
+ else
+ env->spr[SPR_DSISR] = 0x40000000;
+ } else {
+ env->exception_index = POWERPC_EXCP_DSEG;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ }
+ break;
+#endif
+ }
+ }
+#if 0
+ printf("%s: set exception to %d %02x\n", __func__,
+ env->exception, env->error_code);
+#endif
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*****************************************************************************/
+/* BATs management */
+#if !defined(FLUSH_ALL_TLBS)
+static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
+ target_ulong mask)
+{
+ target_ulong base, end, page;
+
+ base = BATu & ~0x0001FFFF;
+ end = base + mask + 0x00020000;
+ LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
+ TARGET_FMT_lx ")\n", base, end, mask);
+ for (page = base; page != end; page += TARGET_PAGE_SIZE)
+ tlb_flush_page(env, page);
+ LOG_BATS("Flush done\n");
+}
+#endif
+
+static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
+ target_ulong value)
+{
+ LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
+ nr, ul == 0 ? 'u' : 'l', value, env->nip);
+}
+
+void ppc_store_ibatu (CPUPPCState *env, int nr, target_ulong value)
+{
+ target_ulong mask;
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#endif
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
+ (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ tlb_flush(env, 1);
+#endif
+ }
+}
+
+void ppc_store_ibatl (CPUPPCState *env, int nr, target_ulong value)
+{
+ dump_store_bat(env, 'I', 1, nr, value);
+ env->IBAT[1][nr] = value;
+}
+
+void ppc_store_dbatu (CPUPPCState *env, int nr, target_ulong value)
+{
+ target_ulong mask;
+
+ dump_store_bat(env, 'D', 0, nr, value);
+ if (env->DBAT[0][nr] != value) {
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#endif
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->DBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
+ (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#else
+ tlb_flush(env, 1);
+#endif
+ }
+}
+
+void ppc_store_dbatl (CPUPPCState *env, int nr, target_ulong value)
+{
+ dump_store_bat(env, 'D', 1, nr, value);
+ env->DBAT[1][nr] = value;
+}
+
+void ppc_store_ibatu_601 (CPUPPCState *env, int nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ if (env->IBAT[1][nr] & 0x40) {
+ /* Invalidate BAT only if it is valid */
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[0][nr] = env->IBAT[0][nr];
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval)
+ tlb_flush(env, 1);
+#endif
+ }
+}
+
+void ppc_store_ibatl_601 (CPUPPCState *env, int nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 1, nr, value);
+ if (env->IBAT[1][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ if (value & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (value << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ env->IBAT[1][nr] = value;
+ env->DBAT[1][nr] = value;
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval)
+ tlb_flush(env, 1);
+#endif
+ }
+}
+
+/*****************************************************************************/
+/* TLB management */
+void ppc_tlb_invalidate_all (CPUPPCState *env)
+{
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ ppc4xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE:
+ tlb_flush(env, 1);
+ break;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ if (!kvm_enabled())
+ cpu_abort(env, "BookE MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+#endif /* defined(TARGET_PPC64) */
+ tlb_flush(env, 1);
+ break;
+ default:
+ /* XXX: TODO */
+ cpu_abort(env, "Unknown MMU model\n");
+ break;
+ }
+}
+
+void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ addr &= TARGET_PAGE_MASK;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_virt(env, addr, 0);
+ if (env->id_tlbs == 1)
+ ppc6xx_tlb_invalidate_virt(env, addr, 1);
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE_FSL:
+ /* XXX: TODO */
+ cpu_abort(env, "BookE FSL MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ /* tlbie invalidate TLBs for all segments */
+ addr &= ~((target_ulong)-1ULL << 28);
+ /* XXX: this case should be optimized,
+ * giving a mask to tlb_flush_page
+ */
+ tlb_flush_page(env, addr | (0x0 << 28));
+ tlb_flush_page(env, addr | (0x1 << 28));
+ tlb_flush_page(env, addr | (0x2 << 28));
+ tlb_flush_page(env, addr | (0x3 << 28));
+ tlb_flush_page(env, addr | (0x4 << 28));
+ tlb_flush_page(env, addr | (0x5 << 28));
+ tlb_flush_page(env, addr | (0x6 << 28));
+ tlb_flush_page(env, addr | (0x7 << 28));
+ tlb_flush_page(env, addr | (0x8 << 28));
+ tlb_flush_page(env, addr | (0x9 << 28));
+ tlb_flush_page(env, addr | (0xA << 28));
+ tlb_flush_page(env, addr | (0xB << 28));
+ tlb_flush_page(env, addr | (0xC << 28));
+ tlb_flush_page(env, addr | (0xD << 28));
+ tlb_flush_page(env, addr | (0xE << 28));
+ tlb_flush_page(env, addr | (0xF << 28));
+ break;
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_620:
+ case POWERPC_MMU_64B:
+ /* tlbie invalidate TLBs for all segments */
+ /* XXX: given the fact that there are too many segments to invalidate,
+ * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu,
+ * we just invalidate all TLBs
+ */
+ tlb_flush(env, 1);
+ break;
+#endif /* defined(TARGET_PPC64) */
+ default:
+ /* XXX: TODO */
+ cpu_abort(env, "Unknown MMU model\n");
+ break;
+ }
+#else
+ ppc_tlb_invalidate_all(env);
+#endif
+}
+
+/*****************************************************************************/
+/* Special registers manipulation */
+#if defined(TARGET_PPC64)
+void ppc_store_asr (CPUPPCState *env, target_ulong value)
+{
+ if (env->asr != value) {
+ env->asr = value;
+ tlb_flush(env, 1);
+ }
+}
+#endif
+
+void ppc_store_sdr1 (CPUPPCState *env, target_ulong value)
+{
+ LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value);
+ if (env->sdr1 != value) {
+ /* XXX: for PowerPC 64, should check that the HTABSIZE value
+ * is <= 28
+ */
+ env->sdr1 = value;
+ tlb_flush(env, 1);
+ }
+}
+
+#if defined(TARGET_PPC64)
+target_ulong ppc_load_sr (CPUPPCState *env, int slb_nr)
+{
+ // XXX
+ return 0;
+}
+#endif
+
+void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value)
+{
+ LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
+ srnum, value, env->sr[srnum]);
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ uint64_t rb = 0, rs = 0;
+
+ /* ESID = srnum */
+ rb |= ((uint32_t)srnum & 0xf) << 28;
+ /* Set the valid bit */
+ rb |= 1 << 27;
+ /* Index = ESID */
+ rb |= (uint32_t)srnum;
+
+ /* VSID = VSID */
+ rs |= (value & 0xfffffff) << 12;
+ /* flags = flags */
+ rs |= ((value >> 27) & 0xf) << 9;
+
+ ppc_store_slb(env, rb, rs);
+ } else
+#endif
+ if (env->sr[srnum] != value) {
+ env->sr[srnum] = value;
+/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
+ flusing the whole TLB. */
+#if !defined(FLUSH_ALL_TLBS) && 0
+ {
+ target_ulong page, end;
+ /* Invalidate 256 MB of virtual memory */
+ page = (16 << 20) * srnum;
+ end = page + (16 << 20);
+ for (; page != end; page += TARGET_PAGE_SIZE)
+ tlb_flush_page(env, page);
+ }
+#else
+ tlb_flush(env, 1);
+#endif
+ }
+}
+#endif /* !defined (CONFIG_USER_ONLY) */
+
+/* GDBstub can read and write MSR... */
+void ppc_store_msr (CPUPPCState *env, target_ulong value)
+{
+ hreg_store_msr(env, value, 0);
+}
+
+/*****************************************************************************/
+/* Exception processing */
+#if defined (CONFIG_USER_ONLY)
+void do_interrupt (CPUState *env)
+{
+ env->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+}
+
+void ppc_hw_interrupt (CPUState *env)
+{
+ env->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+}
+#else /* defined (CONFIG_USER_ONLY) */
+static inline void dump_syscall(CPUState *env)
+{
+ qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
+ " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
+ " nip=" TARGET_FMT_lx "\n",
+ ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
+ ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
+ ppc_dump_gpr(env, 6), env->nip);
+}
+
+/* Note that this function should be greatly optimized
+ * when called with a constant excp, from ppc_hw_interrupt
+ */
+static inline void powerpc_excp(CPUState *env, int excp_model, int excp)
+{
+ target_ulong msr, new_msr, vector;
+ int srr0, srr1, asrr0, asrr1;
+ int lpes0, lpes1, lev;
+
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor mode */
+ lpes0 = (env->spr[SPR_LPCR] >> 1) & 1;
+ lpes1 = (env->spr[SPR_LPCR] >> 2) & 1;
+ } else {
+ /* Those values ensure we won't enter the hypervisor mode */
+ lpes0 = 0;
+ lpes1 = 1;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %08x (%02x)\n", env->nip, excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /* new interrupt handler msr */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /* target registers */
+ srr0 = SPR_SRR0;
+ srr1 = SPR_SRR1;
+ asrr0 = -1;
+ asrr1 = -1;
+
+ switch (excp) {
+ case POWERPC_EXCP_NONE:
+ /* Should never happen */
+ return;
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_G2:
+ break;
+ default:
+ goto excp_invalid;
+ }
+ goto store_next;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /* Machine check exception is not enabled.
+ * Enter checkstop state.
+ */
+ if (qemu_log_enabled()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ } else {
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ env->halted = 1;
+ env->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor mode */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ /* XXX: should also have something loaded in DAR / DSISR */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_MCSRR0;
+ srr1 = SPR_BOOKE_MCSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ goto store_next;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
+ "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
+ "\n", msr, env->nip);
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ msr |= env->error_code;
+ goto store_next;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ if (lpes0 == 1)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ /* XXX: this is false */
+ /* Get rS/rD and rA from faulting opcode */
+ env->spr[SPR_DSISR] |= (ldl_code((env->nip - 4)) & 0x03FF0000) >> 16;
+ goto store_current;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ LOG_EXCP("Ignore floating point exception\n");
+ env->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ msr |= 0x00100000;
+ if (msr_fe0 == msr_fe1)
+ goto store_next;
+ msr |= 0x00010000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ msr |= 0x00020000;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(env, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ goto store_current;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_current;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ dump_syscall(env);
+ lev = env->error_code;
+ if (lev == 1 || (lpes0 == 0 && lpes1 == 0))
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
+ goto store_current;
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ /* FIT on 4xx */
+ LOG_EXCP("FIT exception\n");
+ goto store_next;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ LOG_EXCP("WDT exception\n");
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ goto store_next;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ goto store_next;
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ goto store_next;
+ case POWERPC_EXCP_DEBUG: /* Debug interrupt */
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_DSRR0;
+ srr1 = SPR_BOOKE_DSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ /* XXX: TODO */
+ cpu_abort(env, "Debug exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
+ goto store_current;
+ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
+ /* XXX: TODO */
+ cpu_abort(env, "Embedded floating point data exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
+ /* XXX: TODO */
+ cpu_abort(env, "Embedded floating point round exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
+ /* XXX: TODO */
+ cpu_abort(env,
+ "Performance counter exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
+ /* XXX: TODO */
+ cpu_abort(env,
+ "Embedded doorbell interrupt is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ /* XXX: TODO */
+ cpu_abort(env, "Embedded doorbell critical interrupt "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ if (msr_pow) {
+ /* indicate that we resumed from power save mode */
+ msr |= 0x10000;
+ } else {
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ }
+
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor mode */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ goto store_next;
+ case POWERPC_EXCP_DSEG: /* Data segment exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ goto store_next;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_next;
+ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ goto store_next;
+ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ goto store_next;
+ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ goto store_next;
+ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ goto store_next;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ goto store_current;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
+ LOG_EXCP("PIT exception\n");
+ goto store_next;
+ case POWERPC_EXCP_IO: /* IO error exception */
+ /* XXX: TODO */
+ cpu_abort(env, "601 IO error exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_RUNM: /* Run mode exception */
+ /* XXX: TODO */
+ cpu_abort(env, "601 run mode exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_EMUL: /* Emulation trap exception */
+ /* XXX: TODO */
+ cpu_abort(env, "602 emulation trap exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ if (lpes1 == 0) /* XXX: check this */
+ new_msr |= (target_ulong)MSR_HVB;
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ goto tlb_miss_tgpr;
+ case POWERPC_EXCP_7x5:
+ goto tlb_miss;
+ case POWERPC_EXCP_74xx:
+ goto tlb_miss_74xx;
+ default:
+ cpu_abort(env, "Invalid instruction TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ if (lpes1 == 0) /* XXX: check this */
+ new_msr |= (target_ulong)MSR_HVB;
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ goto tlb_miss_tgpr;
+ case POWERPC_EXCP_7x5:
+ goto tlb_miss;
+ case POWERPC_EXCP_74xx:
+ goto tlb_miss_74xx;
+ default:
+ cpu_abort(env, "Invalid data load TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ if (lpes1 == 0) /* XXX: check this */
+ new_msr |= (target_ulong)MSR_HVB;
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ tlb_miss_tgpr:
+ /* Swap temporary saved registers with GPRs */
+ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
+ new_msr |= (target_ulong)1 << MSR_TGPR;
+ hreg_swap_gpr_tgpr(env);
+ }
+ goto tlb_miss;
+ case POWERPC_EXCP_7x5:
+ tlb_miss:
+#if defined (DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_IMISS];
+ cmp = &env->spr[SPR_ICMP];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB)
+ es = "DL";
+ else
+ es = "DS";
+ en = 'D';
+ miss = &env->spr[SPR_DMISS];
+ cmp = &env->spr[SPR_DCMP];
+ }
+ qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->spr[SPR_HASH1], env->spr[SPR_HASH2],
+ env->error_code);
+ }
+#endif
+ msr |= env->crf[0] << 28;
+ msr |= env->error_code; /* key, D/I, S/L bits */
+ /* Set way using a LRU mechanism */
+ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
+ break;
+ case POWERPC_EXCP_74xx:
+ tlb_miss_74xx:
+#if defined (DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB)
+ es = "DL";
+ else
+ es = "DS";
+ en = 'D';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ }
+ qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->error_code);
+ }
+#endif
+ msr |= env->error_code; /* key bit */
+ break;
+ default:
+ cpu_abort(env, "Invalid data store TLB miss exception\n");
+ break;
+ }
+ goto store_next;
+ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
+ /* XXX: TODO */
+ cpu_abort(env, "Floating point assist exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_DABR: /* Data address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(env, "DABR exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(env, "IABR exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ /* XXX: TODO */
+ cpu_abort(env, "SMI exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ /* XXX: TODO */
+ cpu_abort(env, "Thermal management exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ if (lpes1 == 0)
+ new_msr |= (target_ulong)MSR_HVB;
+ /* XXX: TODO */
+ cpu_abort(env,
+ "Performance counter exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ /* XXX: TODO */
+ cpu_abort(env, "VPU assist exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_SOFTP: /* Soft patch exception */
+ /* XXX: TODO */
+ cpu_abort(env,
+ "970 soft-patch exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ /* XXX: TODO */
+ cpu_abort(env,
+ "970 maintenance exception is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(env, "Maskable external exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(env, "Non maskable external exception "
+ "is not implemented yet !\n");
+ goto store_next;
+ default:
+ excp_invalid:
+ cpu_abort(env, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ store_current:
+ /* save current instruction location */
+ env->spr[srr0] = env->nip - 4;
+ break;
+ store_next:
+ /* save next instruction location */
+ env->spr[srr0] = env->nip;
+ break;
+ }
+ /* Save MSR */
+ env->spr[srr1] = msr;
+ /* If any alternate SRR register are defined, duplicate saved values */
+ if (asrr0 != -1)
+ env->spr[asrr0] = env->spr[srr0];
+ if (asrr1 != -1)
+ env->spr[asrr1] = env->spr[srr1];
+ /* If we disactivated any translation, flush TLBs */
+ if (new_msr & ((1 << MSR_IR) | (1 << MSR_DR)))
+ tlb_flush(env, 1);
+
+ if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+
+ /* Jump to handler */
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+ vector |= env->excp_prefix;
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_BOOKE) {
+ if (!msr_icm) {
+ vector = (uint32_t)vector;
+ } else {
+ new_msr |= (target_ulong)1 << MSR_CM;
+ }
+ } else {
+ if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
+ vector = (uint32_t)vector;
+ } else {
+ new_msr |= (target_ulong)1 << MSR_SF;
+ }
+ }
+#endif
+ /* XXX: we don't use hreg_store_msr here as already have treated
+ * any special case that could occur. Just store MSR and update hflags
+ */
+ env->msr = new_msr & env->msr_mask;
+ hreg_compute_hflags(env);
+ env->nip = vector;
+ /* Reset exception state */
+ env->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+
+ if (env->mmu_model == POWERPC_MMU_BOOKE) {
+ /* XXX: The BookE changes address space when switching modes,
+ we should probably implement that as different MMU indexes,
+ but for the moment we do it the slow way and flush all. */
+ tlb_flush(env, 1);
+ }
+}
+
+void do_interrupt (CPUState *env)
+{
+ powerpc_excp(env, env->excp_model, env->exception_index);
+}
+
+void ppc_hw_interrupt (CPUPPCState *env)
+{
+ int hdice;
+
+#if 0
+ qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n",
+ __func__, env, env->pending_interrupts,
+ env->interrupt_request, (int)msr_me, (int)msr_ee);
+#endif
+ /* External reset */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_RESET);
+ return;
+ }
+ /* Machine check exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_MCHECK);
+ return;
+ }
+#if 0 /* TODO */
+ /* External debug exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_DEBUG);
+ return;
+ }
+#endif
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor mode */
+ hdice = env->spr[SPR_LPCR] & 1;
+ } else {
+ hdice = 0;
+ }
+ if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) {
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_HDECR);
+ return;
+ }
+ }
+ if (msr_ce != 0) {
+ /* External critical interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
+ /* Taking a critical external interrupt does not clear the external
+ * critical interrupt status
+ */
+#if 0
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT);
+#endif
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_CRITICAL);
+ return;
+ }
+ }
+ if (msr_ee != 0) {
+ /* Watchdog timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_WDT);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORCI);
+ return;
+ }
+ /* Fixed interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_FIT);
+ return;
+ }
+ /* Programmable interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_PIT);
+ return;
+ }
+ /* Decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_DECR);
+ return;
+ }
+ /* External interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
+ /* Taking an external interrupt does not clear the external
+ * interrupt status
+ */
+#if 0
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT);
+#endif
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_EXTERNAL);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORI);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_PERFM);
+ return;
+ }
+ /* Thermal interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
+ powerpc_excp(env, env->excp_model, POWERPC_EXCP_THERM);
+ return;
+ }
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void cpu_dump_rfi (target_ulong RA, target_ulong msr)
+{
+ qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
+ TARGET_FMT_lx "\n", RA, msr);
+}
+
+void cpu_reset(CPUPPCState *env)
+{
+ target_ulong msr;
+
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
+ log_cpu_state(env, 0);
+ }
+
+ msr = (target_ulong)0;
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor mode */
+ msr |= (target_ulong)MSR_HVB;
+ }
+ msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
+ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
+ msr |= (target_ulong)1 << MSR_EP;
+#if defined (DO_SINGLE_STEP) && 0
+ /* Single step trace mode */
+ msr |= (target_ulong)1 << MSR_SE;
+ msr |= (target_ulong)1 << MSR_BE;
+#endif
+#if defined(CONFIG_USER_ONLY)
+ msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
+ msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
+ msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
+ msr |= (target_ulong)1 << MSR_PR;
+#else
+ env->excp_prefix = env->hreset_excp_prefix;
+ env->nip = env->hreset_vector | env->excp_prefix;
+ if (env->mmu_model != POWERPC_MMU_REAL)
+ ppc_tlb_invalidate_all(env);
+#endif
+ env->msr = msr & env->msr_mask;
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64)
+ env->msr |= (1ULL << MSR_SF);
+#endif
+ hreg_compute_hflags(env);
+ env->reserve_addr = (target_ulong)-1ULL;
+ /* Be sure no exception or interrupt is pending */
+ env->pending_interrupts = 0;
+ env->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ /* Flush all TLBs */
+ tlb_flush(env, 1);
+}
+
+CPUPPCState *cpu_ppc_init (const char *cpu_model)
+{
+ CPUPPCState *env;
+ const ppc_def_t *def;
+
+ def = cpu_ppc_find_by_name(cpu_model);
+ if (!def)
+ return NULL;
+
+ env = qemu_mallocz(sizeof(CPUPPCState));
+ cpu_exec_init(env);
+ ppc_translate_init();
+ env->cpu_model_str = cpu_model;
+ cpu_ppc_register_internal(env, def);
+
+ qemu_init_vcpu(env);
+
+ return env;
+}
+
+void cpu_ppc_close (CPUPPCState *env)
+{
+ /* Should also remove all opcode tables... */
+ qemu_free(env);
+}
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
new file mode 100644
index 0000000..2bf9283
--- /dev/null
+++ b/target-ppc/helper.h
@@ -0,0 +1,404 @@
+#include "def-helper.h"
+
+DEF_HELPER_2(raise_exception_err, void, i32, i32)
+DEF_HELPER_1(raise_exception, void, i32)
+DEF_HELPER_3(tw, void, tl, tl, i32)
+#if defined(TARGET_PPC64)
+DEF_HELPER_3(td, void, tl, tl, i32)
+#endif
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_1(store_msr, void, tl)
+DEF_HELPER_0(rfi, void)
+DEF_HELPER_0(rfsvc, void)
+DEF_HELPER_0(40x_rfci, void)
+DEF_HELPER_0(rfci, void)
+DEF_HELPER_0(rfdi, void)
+DEF_HELPER_0(rfmci, void)
+#if defined(TARGET_PPC64)
+DEF_HELPER_0(rfid, void)
+DEF_HELPER_0(hrfid, void)
+#endif
+#endif
+
+DEF_HELPER_2(lmw, void, tl, i32)
+DEF_HELPER_2(stmw, void, tl, i32)
+DEF_HELPER_3(lsw, void, tl, i32, i32)
+DEF_HELPER_4(lswx, void, tl, i32, i32, i32)
+DEF_HELPER_3(stsw, void, tl, i32, i32)
+DEF_HELPER_1(dcbz, void, tl)
+DEF_HELPER_1(dcbz_970, void, tl)
+DEF_HELPER_1(icbi, void, tl)
+DEF_HELPER_4(lscbx, tl, tl, i32, i32, i32)
+
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_2(mulhd, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(mulhdu, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
+DEF_HELPER_2(mulldo, i64, i64, i64)
+#endif
+
+DEF_HELPER_FLAGS_1(cntlzw, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
+DEF_HELPER_2(sraw, tl, tl, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_1(cntlzd, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntb_64, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
+DEF_HELPER_2(srad, tl, tl, tl)
+#endif
+
+DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32)
+DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32)
+DEF_HELPER_FLAGS_2(brinc, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl, tl)
+
+DEF_HELPER_0(float_check_status, void)
+#ifdef CONFIG_SOFTFLOAT
+DEF_HELPER_0(reset_fpstatus, void)
+#endif
+DEF_HELPER_2(compute_fprf, i32, i64, i32)
+DEF_HELPER_2(store_fpscr, void, i64, i32)
+DEF_HELPER_1(fpscr_clrbit, void, i32)
+DEF_HELPER_1(fpscr_setbit, void, i32)
+DEF_HELPER_1(float64_to_float32, i32, i64)
+DEF_HELPER_1(float32_to_float64, i64, i32)
+
+DEF_HELPER_3(fcmpo, void, i64, i64, i32)
+DEF_HELPER_3(fcmpu, void, i64, i64, i32)
+
+DEF_HELPER_1(fctiw, i64, i64)
+DEF_HELPER_1(fctiwz, i64, i64)
+#if defined(TARGET_PPC64)
+DEF_HELPER_1(fcfid, i64, i64)
+DEF_HELPER_1(fctid, i64, i64)
+DEF_HELPER_1(fctidz, i64, i64)
+#endif
+DEF_HELPER_1(frsp, i64, i64)
+DEF_HELPER_1(frin, i64, i64)
+DEF_HELPER_1(friz, i64, i64)
+DEF_HELPER_1(frip, i64, i64)
+DEF_HELPER_1(frim, i64, i64)
+
+DEF_HELPER_2(fadd, i64, i64, i64)
+DEF_HELPER_2(fsub, i64, i64, i64)
+DEF_HELPER_2(fmul, i64, i64, i64)
+DEF_HELPER_2(fdiv, i64, i64, i64)
+DEF_HELPER_3(fmadd, i64, i64, i64, i64)
+DEF_HELPER_3(fmsub, i64, i64, i64, i64)
+DEF_HELPER_3(fnmadd, i64, i64, i64, i64)
+DEF_HELPER_3(fnmsub, i64, i64, i64, i64)
+DEF_HELPER_1(fabs, i64, i64)
+DEF_HELPER_1(fnabs, i64, i64)
+DEF_HELPER_1(fneg, i64, i64)
+DEF_HELPER_1(fsqrt, i64, i64)
+DEF_HELPER_1(fre, i64, i64)
+DEF_HELPER_1(fres, i64, i64)
+DEF_HELPER_1(frsqrte, i64, i64)
+DEF_HELPER_3(fsel, i64, i64, i64, i64)
+
+#define dh_alias_avr ptr
+#define dh_ctype_avr ppc_avr_t *
+#define dh_is_signed_avr dh_is_signed_ptr
+
+DEF_HELPER_3(vaddubm, void, avr, avr, avr)
+DEF_HELPER_3(vadduhm, void, avr, avr, avr)
+DEF_HELPER_3(vadduwm, void, avr, avr, avr)
+DEF_HELPER_3(vsububm, void, avr, avr, avr)
+DEF_HELPER_3(vsubuhm, void, avr, avr, avr)
+DEF_HELPER_3(vsubuwm, void, avr, avr, avr)
+DEF_HELPER_3(vavgub, void, avr, avr, avr)
+DEF_HELPER_3(vavguh, void, avr, avr, avr)
+DEF_HELPER_3(vavguw, void, avr, avr, avr)
+DEF_HELPER_3(vavgsb, void, avr, avr, avr)
+DEF_HELPER_3(vavgsh, void, avr, avr, avr)
+DEF_HELPER_3(vavgsw, void, avr, avr, avr)
+DEF_HELPER_3(vminsb, void, avr, avr, avr)
+DEF_HELPER_3(vminsh, void, avr, avr, avr)
+DEF_HELPER_3(vminsw, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsb, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsh, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsw, void, avr, avr, avr)
+DEF_HELPER_3(vminub, void, avr, avr, avr)
+DEF_HELPER_3(vminuh, void, avr, avr, avr)
+DEF_HELPER_3(vminuw, void, avr, avr, avr)
+DEF_HELPER_3(vmaxub, void, avr, avr, avr)
+DEF_HELPER_3(vmaxuh, void, avr, avr, avr)
+DEF_HELPER_3(vmaxuw, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequb, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequh, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequw, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtub, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtuh, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtuw, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsb, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsh, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsw, void, avr, avr, avr)
+DEF_HELPER_3(vcmpeqfp, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgefp, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtfp, void, avr, avr, avr)
+DEF_HELPER_3(vcmpbfp, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequb_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequh_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpequw_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtub_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtuh_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtuw_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsb_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsh_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtsw_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpeqfp_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgefp_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpgtfp_dot, void, avr, avr, avr)
+DEF_HELPER_3(vcmpbfp_dot, void, avr, avr, avr)
+DEF_HELPER_3(vmrglb, void, avr, avr, avr)
+DEF_HELPER_3(vmrglh, void, avr, avr, avr)
+DEF_HELPER_3(vmrglw, void, avr, avr, avr)
+DEF_HELPER_3(vmrghb, void, avr, avr, avr)
+DEF_HELPER_3(vmrghh, void, avr, avr, avr)
+DEF_HELPER_3(vmrghw, void, avr, avr, avr)
+DEF_HELPER_3(vmulesb, void, avr, avr, avr)
+DEF_HELPER_3(vmulesh, void, avr, avr, avr)
+DEF_HELPER_3(vmuleub, void, avr, avr, avr)
+DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
+DEF_HELPER_3(vmulosb, void, avr, avr, avr)
+DEF_HELPER_3(vmulosh, void, avr, avr, avr)
+DEF_HELPER_3(vmuloub, void, avr, avr, avr)
+DEF_HELPER_3(vmulouh, void, avr, avr, avr)
+DEF_HELPER_3(vsrab, void, avr, avr, avr)
+DEF_HELPER_3(vsrah, void, avr, avr, avr)
+DEF_HELPER_3(vsraw, void, avr, avr, avr)
+DEF_HELPER_3(vsrb, void, avr, avr, avr)
+DEF_HELPER_3(vsrh, void, avr, avr, avr)
+DEF_HELPER_3(vsrw, void, avr, avr, avr)
+DEF_HELPER_3(vslb, void, avr, avr, avr)
+DEF_HELPER_3(vslh, void, avr, avr, avr)
+DEF_HELPER_3(vslw, void, avr, avr, avr)
+DEF_HELPER_3(vslo, void, avr, avr, avr)
+DEF_HELPER_3(vsro, void, avr, avr, avr)
+DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
+DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
+DEF_HELPER_2(lvsl, void, avr, tl);
+DEF_HELPER_2(lvsr, void, avr, tl);
+DEF_HELPER_3(vaddsbs, void, avr, avr, avr)
+DEF_HELPER_3(vaddshs, void, avr, avr, avr)
+DEF_HELPER_3(vaddsws, void, avr, avr, avr)
+DEF_HELPER_3(vsubsbs, void, avr, avr, avr)
+DEF_HELPER_3(vsubshs, void, avr, avr, avr)
+DEF_HELPER_3(vsubsws, void, avr, avr, avr)
+DEF_HELPER_3(vaddubs, void, avr, avr, avr)
+DEF_HELPER_3(vadduhs, void, avr, avr, avr)
+DEF_HELPER_3(vadduws, void, avr, avr, avr)
+DEF_HELPER_3(vsububs, void, avr, avr, avr)
+DEF_HELPER_3(vsubuhs, void, avr, avr, avr)
+DEF_HELPER_3(vsubuws, void, avr, avr, avr)
+DEF_HELPER_3(vrlb, void, avr, avr, avr)
+DEF_HELPER_3(vrlh, void, avr, avr, avr)
+DEF_HELPER_3(vrlw, void, avr, avr, avr)
+DEF_HELPER_3(vsl, void, avr, avr, avr)
+DEF_HELPER_3(vsr, void, avr, avr, avr)
+DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
+DEF_HELPER_2(vspltisb, void, avr, i32)
+DEF_HELPER_2(vspltish, void, avr, i32)
+DEF_HELPER_2(vspltisw, void, avr, i32)
+DEF_HELPER_3(vspltb, void, avr, avr, i32)
+DEF_HELPER_3(vsplth, void, avr, avr, i32)
+DEF_HELPER_3(vspltw, void, avr, avr, i32)
+DEF_HELPER_2(vupkhpx, void, avr, avr)
+DEF_HELPER_2(vupklpx, void, avr, avr)
+DEF_HELPER_2(vupkhsb, void, avr, avr)
+DEF_HELPER_2(vupkhsh, void, avr, avr)
+DEF_HELPER_2(vupklsb, void, avr, avr)
+DEF_HELPER_2(vupklsh, void, avr, avr)
+DEF_HELPER_4(vmsumubm, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmsummbm, void, avr, avr, avr, avr)
+DEF_HELPER_4(vsel, void, avr, avr, avr, avr)
+DEF_HELPER_4(vperm, void, avr, avr, avr, avr)
+DEF_HELPER_3(vpkshss, void, avr, avr, avr)
+DEF_HELPER_3(vpkshus, void, avr, avr, avr)
+DEF_HELPER_3(vpkswss, void, avr, avr, avr)
+DEF_HELPER_3(vpkswus, void, avr, avr, avr)
+DEF_HELPER_3(vpkuhus, void, avr, avr, avr)
+DEF_HELPER_3(vpkuwus, void, avr, avr, avr)
+DEF_HELPER_3(vpkuhum, void, avr, avr, avr)
+DEF_HELPER_3(vpkuwum, void, avr, avr, avr)
+DEF_HELPER_3(vpkpx, void, avr, avr, avr)
+DEF_HELPER_4(vmhaddshs, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmhraddshs, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmsumuhm, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmsumuhs, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmsumshm, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmsumshs, void, avr, avr, avr, avr)
+DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
+DEF_HELPER_1(mtvscr, void, avr);
+DEF_HELPER_2(lvebx, void, avr, tl)
+DEF_HELPER_2(lvehx, void, avr, tl)
+DEF_HELPER_2(lvewx, void, avr, tl)
+DEF_HELPER_2(stvebx, void, avr, tl)
+DEF_HELPER_2(stvehx, void, avr, tl)
+DEF_HELPER_2(stvewx, void, avr, tl)
+DEF_HELPER_3(vsumsws, void, avr, avr, avr)
+DEF_HELPER_3(vsum2sws, void, avr, avr, avr)
+DEF_HELPER_3(vsum4sbs, void, avr, avr, avr)
+DEF_HELPER_3(vsum4shs, void, avr, avr, avr)
+DEF_HELPER_3(vsum4ubs, void, avr, avr, avr)
+DEF_HELPER_3(vaddfp, void, avr, avr, avr)
+DEF_HELPER_3(vsubfp, void, avr, avr, avr)
+DEF_HELPER_3(vmaxfp, void, avr, avr, avr)
+DEF_HELPER_3(vminfp, void, avr, avr, avr)
+DEF_HELPER_2(vrefp, void, avr, avr)
+DEF_HELPER_2(vrsqrtefp, void, avr, avr)
+DEF_HELPER_4(vmaddfp, void, avr, avr, avr, avr)
+DEF_HELPER_4(vnmsubfp, void, avr, avr, avr, avr)
+DEF_HELPER_2(vexptefp, void, avr, avr)
+DEF_HELPER_2(vlogefp, void, avr, avr)
+DEF_HELPER_2(vrfim, void, avr, avr)
+DEF_HELPER_2(vrfin, void, avr, avr)
+DEF_HELPER_2(vrfip, void, avr, avr)
+DEF_HELPER_2(vrfiz, void, avr, avr)
+DEF_HELPER_3(vcfux, void, avr, avr, i32)
+DEF_HELPER_3(vcfsx, void, avr, avr, i32)
+DEF_HELPER_3(vctuxs, void, avr, avr, i32)
+DEF_HELPER_3(vctsxs, void, avr, avr, i32)
+
+DEF_HELPER_1(efscfsi, i32, i32)
+DEF_HELPER_1(efscfui, i32, i32)
+DEF_HELPER_1(efscfuf, i32, i32)
+DEF_HELPER_1(efscfsf, i32, i32)
+DEF_HELPER_1(efsctsi, i32, i32)
+DEF_HELPER_1(efsctui, i32, i32)
+DEF_HELPER_1(efsctsiz, i32, i32)
+DEF_HELPER_1(efsctuiz, i32, i32)
+DEF_HELPER_1(efsctsf, i32, i32)
+DEF_HELPER_1(efsctuf, i32, i32)
+DEF_HELPER_1(evfscfsi, i64, i64)
+DEF_HELPER_1(evfscfui, i64, i64)
+DEF_HELPER_1(evfscfuf, i64, i64)
+DEF_HELPER_1(evfscfsf, i64, i64)
+DEF_HELPER_1(evfsctsi, i64, i64)
+DEF_HELPER_1(evfsctui, i64, i64)
+DEF_HELPER_1(evfsctsiz, i64, i64)
+DEF_HELPER_1(evfsctuiz, i64, i64)
+DEF_HELPER_1(evfsctsf, i64, i64)
+DEF_HELPER_1(evfsctuf, i64, i64)
+DEF_HELPER_2(efsadd, i32, i32, i32)
+DEF_HELPER_2(efssub, i32, i32, i32)
+DEF_HELPER_2(efsmul, i32, i32, i32)
+DEF_HELPER_2(efsdiv, i32, i32, i32)
+DEF_HELPER_2(evfsadd, i64, i64, i64)
+DEF_HELPER_2(evfssub, i64, i64, i64)
+DEF_HELPER_2(evfsmul, i64, i64, i64)
+DEF_HELPER_2(evfsdiv, i64, i64, i64)
+DEF_HELPER_2(efststlt, i32, i32, i32)
+DEF_HELPER_2(efststgt, i32, i32, i32)
+DEF_HELPER_2(efststeq, i32, i32, i32)
+DEF_HELPER_2(efscmplt, i32, i32, i32)
+DEF_HELPER_2(efscmpgt, i32, i32, i32)
+DEF_HELPER_2(efscmpeq, i32, i32, i32)
+DEF_HELPER_2(evfststlt, i32, i64, i64)
+DEF_HELPER_2(evfststgt, i32, i64, i64)
+DEF_HELPER_2(evfststeq, i32, i64, i64)
+DEF_HELPER_2(evfscmplt, i32, i64, i64)
+DEF_HELPER_2(evfscmpgt, i32, i64, i64)
+DEF_HELPER_2(evfscmpeq, i32, i64, i64)
+DEF_HELPER_1(efdcfsi, i64, i32)
+DEF_HELPER_1(efdcfsid, i64, i64)
+DEF_HELPER_1(efdcfui, i64, i32)
+DEF_HELPER_1(efdcfuid, i64, i64)
+DEF_HELPER_1(efdctsi, i32, i64)
+DEF_HELPER_1(efdctui, i32, i64)
+DEF_HELPER_1(efdctsiz, i32, i64)
+DEF_HELPER_1(efdctsidz, i64, i64)
+DEF_HELPER_1(efdctuiz, i32, i64)
+DEF_HELPER_1(efdctuidz, i64, i64)
+DEF_HELPER_1(efdcfsf, i64, i32)
+DEF_HELPER_1(efdcfuf, i64, i32)
+DEF_HELPER_1(efdctsf, i32, i64)
+DEF_HELPER_1(efdctuf, i32, i64)
+DEF_HELPER_1(efscfd, i32, i64)
+DEF_HELPER_1(efdcfs, i64, i32)
+DEF_HELPER_2(efdadd, i64, i64, i64)
+DEF_HELPER_2(efdsub, i64, i64, i64)
+DEF_HELPER_2(efdmul, i64, i64, i64)
+DEF_HELPER_2(efddiv, i64, i64, i64)
+DEF_HELPER_2(efdtstlt, i32, i64, i64)
+DEF_HELPER_2(efdtstgt, i32, i64, i64)
+DEF_HELPER_2(efdtsteq, i32, i64, i64)
+DEF_HELPER_2(efdcmplt, i32, i64, i64)
+DEF_HELPER_2(efdcmpgt, i32, i64, i64)
+DEF_HELPER_2(efdcmpeq, i32, i64, i64)
+
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_1(4xx_tlbre_hi, tl, tl)
+DEF_HELPER_1(4xx_tlbre_lo, tl, tl)
+DEF_HELPER_2(4xx_tlbwe_hi, void, tl, tl)
+DEF_HELPER_2(4xx_tlbwe_lo, void, tl, tl)
+DEF_HELPER_1(4xx_tlbsx, tl, tl)
+DEF_HELPER_2(440_tlbre, tl, i32, tl)
+DEF_HELPER_3(440_tlbwe, void, i32, tl, tl)
+DEF_HELPER_1(440_tlbsx, tl, tl)
+DEF_HELPER_1(6xx_tlbd, void, tl)
+DEF_HELPER_1(6xx_tlbi, void, tl)
+DEF_HELPER_1(74xx_tlbd, void, tl)
+DEF_HELPER_1(74xx_tlbi, void, tl)
+DEF_HELPER_FLAGS_0(tlbia, TCG_CALL_CONST, void)
+DEF_HELPER_FLAGS_1(tlbie, TCG_CALL_CONST, void, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_1(load_slb, TCG_CALL_CONST, tl, tl)
+DEF_HELPER_FLAGS_2(store_slb, TCG_CALL_CONST, void, tl, tl)
+DEF_HELPER_FLAGS_0(slbia, TCG_CALL_CONST, void)
+DEF_HELPER_FLAGS_1(slbie, TCG_CALL_CONST, void, tl)
+#endif
+DEF_HELPER_FLAGS_1(load_sr, TCG_CALL_CONST, tl, tl);
+DEF_HELPER_FLAGS_2(store_sr, TCG_CALL_CONST, void, tl, tl)
+
+DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
+#endif
+
+DEF_HELPER_3(dlmzb, tl, tl, tl, i32)
+DEF_HELPER_FLAGS_1(clcs, TCG_CALL_CONST | TCG_CALL_PURE, tl, i32)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_1(rac, tl, tl)
+#endif
+DEF_HELPER_2(div, tl, tl, tl)
+DEF_HELPER_2(divo, tl, tl, tl)
+DEF_HELPER_2(divs, tl, tl, tl)
+DEF_HELPER_2(divso, tl, tl, tl)
+
+DEF_HELPER_1(load_dcr, tl, tl);
+DEF_HELPER_2(store_dcr, void, tl, tl)
+
+DEF_HELPER_1(load_dump_spr, void, i32)
+DEF_HELPER_1(store_dump_spr, void, i32)
+DEF_HELPER_0(load_tbl, tl)
+DEF_HELPER_0(load_tbu, tl)
+DEF_HELPER_0(load_atbl, tl)
+DEF_HELPER_0(load_atbu, tl)
+DEF_HELPER_0(load_601_rtcl, tl)
+DEF_HELPER_0(load_601_rtcu, tl)
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+DEF_HELPER_1(store_asr, void, tl)
+#endif
+DEF_HELPER_1(store_sdr1, void, tl)
+DEF_HELPER_1(store_tbl, void, tl)
+DEF_HELPER_1(store_tbu, void, tl)
+DEF_HELPER_1(store_atbl, void, tl)
+DEF_HELPER_1(store_atbu, void, tl)
+DEF_HELPER_1(store_601_rtcl, void, tl)
+DEF_HELPER_1(store_601_rtcu, void, tl)
+DEF_HELPER_0(load_decr, tl)
+DEF_HELPER_1(store_decr, void, tl)
+DEF_HELPER_1(store_hid0_601, void, tl)
+DEF_HELPER_2(store_403_pbr, void, i32, tl)
+DEF_HELPER_0(load_40x_pit, tl)
+DEF_HELPER_1(store_40x_pit, void, tl)
+DEF_HELPER_1(store_40x_dbcr0, void, tl)
+DEF_HELPER_1(store_40x_sler, void, tl)
+DEF_HELPER_1(store_booke_tcr, void, tl)
+DEF_HELPER_1(store_booke_tsr, void, tl)
+DEF_HELPER_2(store_ibatl, void, i32, tl)
+DEF_HELPER_2(store_ibatu, void, i32, tl)
+DEF_HELPER_2(store_dbatl, void, i32, tl)
+DEF_HELPER_2(store_dbatu, void, i32, tl)
+DEF_HELPER_2(store_601_batl, void, i32, tl)
+DEF_HELPER_2(store_601_batu, void, i32, tl)
+#endif
+
+#include "def-helper.h"
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
new file mode 100644
index 0000000..3c98850
--- /dev/null
+++ b/target-ppc/helper_regs.h
@@ -0,0 +1,111 @@
+/*
+ * PowerPC emulation special registers manipulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#if !defined(__HELPER_REGS_H__)
+#define __HELPER_REGS_H__
+
+/* Swap temporary saved registers with GPRs */
+static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
+{
+ target_ulong tmp;
+
+ tmp = env->gpr[0];
+ env->gpr[0] = env->tgpr[0];
+ env->tgpr[0] = tmp;
+ tmp = env->gpr[1];
+ env->gpr[1] = env->tgpr[1];
+ env->tgpr[1] = tmp;
+ tmp = env->gpr[2];
+ env->gpr[2] = env->tgpr[2];
+ env->tgpr[2] = tmp;
+ tmp = env->gpr[3];
+ env->gpr[3] = env->tgpr[3];
+ env->tgpr[3] = tmp;
+}
+
+static inline void hreg_compute_mem_idx(CPUPPCState *env)
+{
+ /* Precompute MMU index */
+ if (msr_pr == 0 && msr_hv != 0) {
+ env->mmu_idx = 2;
+ } else {
+ env->mmu_idx = 1 - msr_pr;
+ }
+}
+
+static inline void hreg_compute_hflags(CPUPPCState *env)
+{
+ target_ulong hflags_mask;
+
+ /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
+ hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
+ (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
+ (1 << MSR_LE);
+ hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
+ hreg_compute_mem_idx(env);
+ env->hflags = env->msr & hflags_mask;
+ /* Merge with hflags coming from other registers */
+ env->hflags |= env->hflags_nmsr;
+}
+
+static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
+ int alter_hv)
+{
+ int excp;
+
+ excp = 0;
+ value &= env->msr_mask;
+#if !defined (CONFIG_USER_ONLY)
+ if (!alter_hv) {
+ /* mtmsr cannot alter the hypervisor state */
+ value &= ~MSR_HVB;
+ value |= env->msr & MSR_HVB;
+ }
+ if (((value >> MSR_IR) & 1) != msr_ir ||
+ ((value >> MSR_DR) & 1) != msr_dr) {
+ /* Flush all tlb when changing translation mode */
+ tlb_flush(env, 1);
+ excp = POWERPC_EXCP_NONE;
+ env->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+ if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
+ ((value ^ env->msr) & (1 << MSR_TGPR)))) {
+ /* Swap temporary saved registers with GPRs */
+ hreg_swap_gpr_tgpr(env);
+ }
+ if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
+ /* Change the exception prefix on PowerPC 601 */
+ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
+ }
+#endif
+ env->msr = value;
+ hreg_compute_hflags(env);
+#if !defined (CONFIG_USER_ONLY)
+ if (unlikely(msr_pow == 1)) {
+ if ((*env->check_pow)(env)) {
+ env->halted = 1;
+ excp = EXCP_HALTED;
+ }
+ }
+#endif
+
+ return excp;
+}
+
+#endif /* !defined(__HELPER_REGS_H__) */
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
new file mode 100644
index 0000000..710eca1
--- /dev/null
+++ b/target-ppc/kvm.c
@@ -0,0 +1,406 @@
+/*
+ * PowerPC implementation of KVM hooks
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu-timer.h"
+#include "sysemu.h"
+#include "kvm.h"
+#include "kvm_ppc.h"
+#include "cpu.h"
+#include "device_tree.h"
+
+//#define DEBUG_KVM
+
+#ifdef DEBUG_KVM
+#define dprintf(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define dprintf(fmt, ...) \
+ do { } while (0)
+#endif
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static int cap_interrupt_unset = false;
+static int cap_interrupt_level = false;
+
+/* XXX We have a race condition where we actually have a level triggered
+ * interrupt, but the infrastructure can't expose that yet, so the guest
+ * takes but ignores it, goes to sleep and never gets notified that there's
+ * still an interrupt pending.
+ *
+ * As a quick workaround, let's just wake up again 20 ms after we injected
+ * an interrupt. That way we can assure that we're always reinjecting
+ * interrupts in case the guest swallowed them.
+ */
+static QEMUTimer *idle_timer;
+
+static void kvm_kick_env(void *env)
+{
+ qemu_cpu_kick(env);
+}
+
+int kvm_arch_init(KVMState *s)
+{
+#ifdef KVM_CAP_PPC_UNSET_IRQ
+ cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
+#endif
+#ifdef KVM_CAP_PPC_IRQ_LEVEL
+ cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
+#endif
+
+ if (!cap_interrupt_level) {
+ fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
+ "VM to stall at times!\n");
+ }
+
+ return 0;
+}
+
+int kvm_arch_init_vcpu(CPUState *cenv)
+{
+ int ret = 0;
+ struct kvm_sregs sregs;
+
+ sregs.pvr = cenv->spr[SPR_PVR];
+ ret = kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
+
+ idle_timer = qemu_new_timer(vm_clock, kvm_kick_env, cenv);
+
+ return ret;
+}
+
+void kvm_arch_reset_vcpu(CPUState *env)
+{
+}
+
+int kvm_arch_put_registers(CPUState *env, int level)
+{
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
+ if (ret < 0)
+ return ret;
+
+ regs.ctr = env->ctr;
+ regs.lr = env->lr;
+ regs.xer = env->xer;
+ regs.msr = env->msr;
+ regs.pc = env->nip;
+
+ regs.srr0 = env->spr[SPR_SRR0];
+ regs.srr1 = env->spr[SPR_SRR1];
+
+ regs.sprg0 = env->spr[SPR_SPRG0];
+ regs.sprg1 = env->spr[SPR_SPRG1];
+ regs.sprg2 = env->spr[SPR_SPRG2];
+ regs.sprg3 = env->spr[SPR_SPRG3];
+ regs.sprg4 = env->spr[SPR_SPRG4];
+ regs.sprg5 = env->spr[SPR_SPRG5];
+ regs.sprg6 = env->spr[SPR_SPRG6];
+ regs.sprg7 = env->spr[SPR_SPRG7];
+
+ for (i = 0;i < 32; i++)
+ regs.gpr[i] = env->gpr[i];
+
+ ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *env)
+{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int i, ret;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ if (ret < 0)
+ return ret;
+
+ env->ctr = regs.ctr;
+ env->lr = regs.lr;
+ env->xer = regs.xer;
+ env->msr = regs.msr;
+ env->nip = regs.pc;
+
+ env->spr[SPR_SRR0] = regs.srr0;
+ env->spr[SPR_SRR1] = regs.srr1;
+
+ env->spr[SPR_SPRG0] = regs.sprg0;
+ env->spr[SPR_SPRG1] = regs.sprg1;
+ env->spr[SPR_SPRG2] = regs.sprg2;
+ env->spr[SPR_SPRG3] = regs.sprg3;
+ env->spr[SPR_SPRG4] = regs.sprg4;
+ env->spr[SPR_SPRG5] = regs.sprg5;
+ env->spr[SPR_SPRG6] = regs.sprg6;
+ env->spr[SPR_SPRG7] = regs.sprg7;
+
+ for (i = 0;i < 32; i++)
+ env->gpr[i] = regs.gpr[i];
+
+#ifdef KVM_CAP_PPC_SEGSTATE
+ if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_SEGSTATE)) {
+ env->sdr1 = sregs.u.s.sdr1;
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < 64; i++) {
+ ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
+ sregs.u.s.ppc64.slb[i].slbv);
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ env->sr[i] = sregs.u.s.ppc32.sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
+ env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
+ env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
+ env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+int kvmppc_set_interrupt(CPUState *env, int irq, int level)
+{
+ unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
+
+ if (irq != PPC_INTERRUPT_EXT) {
+ return 0;
+ }
+
+ if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
+ return 0;
+ }
+
+ kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
+
+ return 0;
+}
+
+#if defined(TARGET_PPCEMB)
+#define PPC_INPUT_INT PPC40x_INPUT_INT
+#elif defined(TARGET_PPC64)
+#define PPC_INPUT_INT PPC970_INPUT_INT
+#else
+#define PPC_INPUT_INT PPC6xx_INPUT_INT
+#endif
+
+int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+{
+ int r;
+ unsigned irq;
+
+ /* PowerPC Qemu tracks the various core input pins (interrupt, critical
+ * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
+ if (!cap_interrupt_level &&
+ run->ready_for_interrupt_injection &&
+ (env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->irq_input_state & (1<<PPC_INPUT_INT)))
+ {
+ /* For now KVM disregards the 'irq' argument. However, in the
+ * future KVM could cache it in-kernel to avoid a heavyweight exit
+ * when reading the UIC.
+ */
+ irq = KVM_INTERRUPT_SET;
+
+ dprintf("injected interrupt %d\n", irq);
+ r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
+ if (r < 0)
+ printf("cpu %d fail inject %x\n", env->cpu_index, irq);
+
+ /* Always wake up soon in case the interrupt was level based */
+ qemu_mod_timer(idle_timer, qemu_get_clock(vm_clock) +
+ (get_ticks_per_sec() / 50));
+ }
+
+ /* We don't know if there are more interrupts pending after this. However,
+ * the guest will return to userspace in the course of handling this one
+ * anyways, so we will get a chance to deliver the rest. */
+ return 0;
+}
+
+int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+{
+ return 0;
+}
+
+int kvm_arch_process_irqchip_events(CPUState *env)
+{
+ return 0;
+}
+
+static int kvmppc_handle_halt(CPUState *env)
+{
+ if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
+ env->halted = 1;
+ env->exception_index = EXCP_HLT;
+ }
+
+ return 1;
+}
+
+/* map dcr access to existing qemu dcr emulation */
+static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
+{
+ if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
+ fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
+
+ return 1;
+}
+
+static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
+{
+ if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
+ fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
+
+ return 1;
+}
+
+int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+{
+ int ret = 0;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DCR:
+ if (run->dcr.is_write) {
+ dprintf("handle dcr write\n");
+ ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
+ } else {
+ dprintf("handle dcr read\n");
+ ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
+ }
+ break;
+ case KVM_EXIT_HLT:
+ dprintf("handle halt\n");
+ ret = kvmppc_handle_halt(env);
+ break;
+ default:
+ fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int read_cpuinfo(const char *field, char *value, int len)
+{
+ FILE *f;
+ int ret = -1;
+ int field_len = strlen(field);
+ char line[512];
+
+ f = fopen("/proc/cpuinfo", "r");
+ if (!f) {
+ return -1;
+ }
+
+ do {
+ if(!fgets(line, sizeof(line), f)) {
+ break;
+ }
+ if (!strncmp(line, field, field_len)) {
+ strncpy(value, line, len);
+ ret = 0;
+ break;
+ }
+ } while(*line);
+
+ fclose(f);
+
+ return ret;
+}
+
+uint32_t kvmppc_get_tbfreq(void)
+{
+ char line[512];
+ char *ns;
+ uint32_t retval = get_ticks_per_sec();
+
+ if (read_cpuinfo("timebase", line, sizeof(line))) {
+ return retval;
+ }
+
+ if (!(ns = strchr(line, ':'))) {
+ return retval;
+ }
+
+ ns++;
+
+ retval = atoi(ns);
+ return retval;
+}
+
+int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len)
+{
+ uint32_t *hc = (uint32_t*)buf;
+
+#ifdef KVM_CAP_PPC_GET_PVINFO
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
+ memcpy(buf, pvinfo.hcall, buf_len);
+
+ return 0;
+ }
+#endif
+
+ /*
+ * Fallback to always fail hypercalls:
+ *
+ * li r3, -1
+ * nop
+ * nop
+ * nop
+ */
+
+ hc[0] = 0x3860ffff;
+ hc[1] = 0x60000000;
+ hc[2] = 0x60000000;
+ hc[3] = 0x60000000;
+
+ return 0;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *env)
+{
+ return true;
+}
diff --git a/target-ppc/kvm_ppc.c b/target-ppc/kvm_ppc.c
new file mode 100644
index 0000000..6f0468c
--- /dev/null
+++ b/target-ppc/kvm_ppc.c
@@ -0,0 +1,105 @@
+/*
+ * PowerPC KVM support
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors:
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu-common.h"
+#include "qemu-timer.h"
+#include "kvm_ppc.h"
+#include "device_tree.h"
+
+#define PROC_DEVTREE_PATH "/proc/device-tree"
+
+static QEMUTimer *kvmppc_timer;
+static unsigned int kvmppc_timer_rate;
+
+#ifdef CONFIG_FDT
+int kvmppc_read_host_property(const char *node_path, const char *prop,
+ void *val, size_t len)
+{
+ char *path;
+ FILE *f;
+ int ret = 0;
+ int pathlen;
+
+ pathlen = snprintf(NULL, 0, "%s/%s/%s", PROC_DEVTREE_PATH, node_path, prop)
+ + 1;
+ path = qemu_malloc(pathlen);
+
+ snprintf(path, pathlen, "%s/%s/%s", PROC_DEVTREE_PATH, node_path, prop);
+
+ f = fopen(path, "rb");
+ if (f == NULL) {
+ ret = errno;
+ goto free;
+ }
+
+ len = fread(val, len, 1, f);
+ if (len != 1) {
+ ret = ferror(f);
+ goto close;
+ }
+
+close:
+ fclose(f);
+free:
+ free(path);
+ return ret;
+}
+
+static int kvmppc_copy_host_cell(void *fdt, const char *node, const char *prop)
+{
+ uint32_t cell;
+ int ret;
+
+ ret = kvmppc_read_host_property(node, prop, &cell, sizeof(cell));
+ if (ret < 0) {
+ fprintf(stderr, "couldn't read host %s/%s\n", node, prop);
+ goto out;
+ }
+
+ ret = qemu_devtree_setprop_cell(fdt, node, prop, cell);
+ if (ret < 0) {
+ fprintf(stderr, "couldn't set guest %s/%s\n", node, prop);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+void kvmppc_fdt_update(void *fdt)
+{
+ /* Copy data from the host device tree into the guest. Since the guest can
+ * directly access the timebase without host involvement, we must expose
+ * the correct frequencies. */
+ kvmppc_copy_host_cell(fdt, "/cpus/cpu@0", "clock-frequency");
+ kvmppc_copy_host_cell(fdt, "/cpus/cpu@0", "timebase-frequency");
+}
+#endif
+
+static void kvmppc_timer_hack(void *opaque)
+{
+ qemu_service_io();
+ qemu_mod_timer(kvmppc_timer, qemu_get_clock(vm_clock) + kvmppc_timer_rate);
+}
+
+void kvmppc_init(void)
+{
+ /* XXX The only reason KVM yields control back to qemu is device IO. Since
+ * an idle guest does no IO, qemu's device model will never get a chance to
+ * run. So, until Qemu gains IO threads, we create this timer to ensure
+ * that the device model gets a chance to run. */
+ kvmppc_timer_rate = get_ticks_per_sec() / 10;
+ kvmppc_timer = qemu_new_timer(vm_clock, &kvmppc_timer_hack, NULL);
+ qemu_mod_timer(kvmppc_timer, qemu_get_clock(vm_clock) + kvmppc_timer_rate);
+}
+
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
new file mode 100644
index 0000000..911b19e
--- /dev/null
+++ b/target-ppc/kvm_ppc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2008 IBM Corporation.
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef __KVM_PPC_H__
+#define __KVM_PPC_H__
+
+void kvmppc_init(void);
+void kvmppc_fdt_update(void *fdt);
+int kvmppc_read_host_property(const char *node_path, const char *prop,
+ void *val, size_t len);
+
+uint32_t kvmppc_get_tbfreq(void);
+int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len);
+int kvmppc_set_interrupt(CPUState *env, int irq, int level);
+
+#ifndef KVM_INTERRUPT_SET
+#define KVM_INTERRUPT_SET -1
+#endif
+
+#ifndef KVM_INTERRUPT_UNSET
+#define KVM_INTERRUPT_UNSET -2
+#endif
+
+#ifndef KVM_INTERRUPT_SET_LEVEL
+#define KVM_INTERRUPT_SET_LEVEL -3
+#endif
+
+#endif /* __KVM_PPC_H__ */
diff --git a/target-ppc/libkvm.c b/target-ppc/libkvm.c
new file mode 100644
index 0000000..da93026
--- /dev/null
+++ b/target-ppc/libkvm.c
@@ -0,0 +1,102 @@
+/*
+ * This file contains the powerpc specific implementation for the
+ * architecture dependent functions defined in kvm-common.h and
+ * libkvm.h
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#include "libkvm-all.h"
+#include "libkvm.h"
+#include <errno.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+int handle_dcr(kvm_vcpu_context_t vcpu)
+{
+ int ret = 0;
+ struct kvm_run *run = vcpu->run;
+ kvm_context_t kvm = vcpu->kvm;
+
+ if (run->dcr.is_write)
+ ret = kvm->callbacks->powerpc_dcr_write(vcpu,
+ run->dcr.dcrn,
+ run->dcr.data);
+ else
+ ret = kvm->callbacks->powerpc_dcr_read(vcpu,
+ run->dcr.dcrn,
+ &(run->dcr.data));
+
+ return ret;
+}
+
+void kvm_show_code(kvm_vcpu_context_t vcpu)
+{
+ fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
+}
+
+void kvm_show_regs(kvm_vcpu_context_t vcpu)
+{
+ struct kvm_regs regs;
+ int i;
+
+ if (kvm_get_regs(vcpu, &regs))
+ return;
+
+ fprintf(stderr,"guest vcpu #%d\n", vcpu);
+ fprintf(stderr,"pc: %016"PRIx64" msr: %016"PRIx64"\n",
+ regs.pc, regs.msr);
+ fprintf(stderr,"lr: %016"PRIx64" ctr: %016"PRIx64"\n",
+ regs.lr, regs.ctr);
+ fprintf(stderr,"srr0: %016"PRIx64" srr1: %016"PRIx64"\n",
+ regs.srr0, regs.srr1);
+ for (i=0; i<32; i+=4)
+ {
+ fprintf(stderr, "gpr%02d: %016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64"\n", i,
+ regs.gpr[i],
+ regs.gpr[i+1],
+ regs.gpr[i+2],
+ regs.gpr[i+3]);
+ }
+
+ fflush(stdout);
+}
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+ int r;
+
+ r = kvm_init_coalesced_mmio(kvm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int kvm_arch_run(kvm_vcpu_context_t vcpu)
+{
+ int ret = 0;
+
+ switch (vcpu->run->exit_reason){
+ case KVM_EXIT_DCR:
+ ret = handle_dcr(vcpu);
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
diff --git a/target-ppc/libkvm.h b/target-ppc/libkvm.h
new file mode 100644
index 0000000..80b6b06
--- /dev/null
+++ b/target-ppc/libkvm.h
@@ -0,0 +1,36 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm for powerpc.
+ * THESE ARE NOT EXPOSED TO THE USER AND ARE ONLY FOR USE
+ * WITHIN LIBKVM.
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright 2007 IBM Corporation.
+ * Added by: Jerone Young <jyoung5@us.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_POWERPC_H
+#define KVM_POWERPC_H
+
+#include "libkvm-all.h"
+
+extern int kvm_page_size;
+
+#define PAGE_SIZE kvm_page_size
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+static inline void eieio(void)
+{
+ asm volatile("eieio" : : : "memory");
+}
+
+#define smp_wmb() eieio()
+
+#endif
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
new file mode 100644
index 0000000..81d3f72
--- /dev/null
+++ b/target-ppc/machine.c
@@ -0,0 +1,180 @@
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "kvm.h"
+#include "qemu-kvm.h"
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ CPUState *env = (CPUState *)opaque;
+ unsigned int i, j;
+
+ for (i = 0; i < 32; i++)
+ qemu_put_betls(f, &env->gpr[i]);
+#if !defined(TARGET_PPC64)
+ for (i = 0; i < 32; i++)
+ qemu_put_betls(f, &env->gprh[i]);
+#endif
+ qemu_put_betls(f, &env->lr);
+ qemu_put_betls(f, &env->ctr);
+ for (i = 0; i < 8; i++)
+ qemu_put_be32s(f, &env->crf[i]);
+ qemu_put_betls(f, &env->xer);
+ qemu_put_betls(f, &env->reserve_addr);
+ qemu_put_betls(f, &env->msr);
+ for (i = 0; i < 4; i++)
+ qemu_put_betls(f, &env->tgpr[i]);
+ for (i = 0; i < 32; i++) {
+ union {
+ float64 d;
+ uint64_t l;
+ } u;
+ u.d = env->fpr[i];
+ qemu_put_be64(f, u.l);
+ }
+ qemu_put_be32s(f, &env->fpscr);
+ qemu_put_sbe32s(f, &env->access_type);
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+ qemu_put_betls(f, &env->asr);
+ qemu_put_sbe32s(f, &env->slb_nr);
+#endif
+ qemu_put_betls(f, &env->sdr1);
+ for (i = 0; i < 32; i++)
+ qemu_put_betls(f, &env->sr[i]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_put_betls(f, &env->DBAT[i][j]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_put_betls(f, &env->IBAT[i][j]);
+ qemu_put_sbe32s(f, &env->nb_tlb);
+ qemu_put_sbe32s(f, &env->tlb_per_way);
+ qemu_put_sbe32s(f, &env->nb_ways);
+ qemu_put_sbe32s(f, &env->last_way);
+ qemu_put_sbe32s(f, &env->id_tlbs);
+ qemu_put_sbe32s(f, &env->nb_pids);
+ if (env->tlb) {
+ // XXX assumes 6xx
+ for (i = 0; i < env->nb_tlb; i++) {
+ qemu_put_betls(f, &env->tlb[i].tlb6.pte0);
+ qemu_put_betls(f, &env->tlb[i].tlb6.pte1);
+ qemu_put_betls(f, &env->tlb[i].tlb6.EPN);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ qemu_put_betls(f, &env->pb[i]);
+#endif
+ for (i = 0; i < 1024; i++)
+ qemu_put_betls(f, &env->spr[i]);
+ qemu_put_be32s(f, &env->vscr);
+ qemu_put_be64s(f, &env->spe_acc);
+ qemu_put_be32s(f, &env->spe_fscr);
+ qemu_put_betls(f, &env->msr_mask);
+ qemu_put_be32s(f, &env->flags);
+ qemu_put_sbe32s(f, &env->error_code);
+ qemu_put_be32s(f, &env->pending_interrupts);
+#if !defined(CONFIG_USER_ONLY)
+ qemu_put_be32s(f, &env->irq_input_state);
+ for (i = 0; i < POWERPC_EXCP_NB; i++)
+ qemu_put_betls(f, &env->excp_vectors[i]);
+ qemu_put_betls(f, &env->excp_prefix);
+ qemu_put_betls(f, &env->hreset_excp_prefix);
+ qemu_put_betls(f, &env->ivor_mask);
+ qemu_put_betls(f, &env->ivpr_mask);
+ qemu_put_betls(f, &env->hreset_vector);
+#endif
+ qemu_put_betls(f, &env->nip);
+ qemu_put_betls(f, &env->hflags);
+ qemu_put_betls(f, &env->hflags_nmsr);
+ qemu_put_sbe32s(f, &env->mmu_idx);
+ qemu_put_sbe32s(f, &env->power_mode);
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CPUState *env = (CPUState *)opaque;
+ unsigned int i, j;
+
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->gpr[i]);
+#if !defined(TARGET_PPC64)
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->gprh[i]);
+#endif
+ qemu_get_betls(f, &env->lr);
+ qemu_get_betls(f, &env->ctr);
+ for (i = 0; i < 8; i++)
+ qemu_get_be32s(f, &env->crf[i]);
+ qemu_get_betls(f, &env->xer);
+ qemu_get_betls(f, &env->reserve_addr);
+ qemu_get_betls(f, &env->msr);
+ for (i = 0; i < 4; i++)
+ qemu_get_betls(f, &env->tgpr[i]);
+ for (i = 0; i < 32; i++) {
+ union {
+ float64 d;
+ uint64_t l;
+ } u;
+ u.l = qemu_get_be64(f);
+ env->fpr[i] = u.d;
+ }
+ qemu_get_be32s(f, &env->fpscr);
+ qemu_get_sbe32s(f, &env->access_type);
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+ qemu_get_betls(f, &env->asr);
+ qemu_get_sbe32s(f, &env->slb_nr);
+#endif
+ qemu_get_betls(f, &env->sdr1);
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->sr[i]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_get_betls(f, &env->DBAT[i][j]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_get_betls(f, &env->IBAT[i][j]);
+ qemu_get_sbe32s(f, &env->nb_tlb);
+ qemu_get_sbe32s(f, &env->tlb_per_way);
+ qemu_get_sbe32s(f, &env->nb_ways);
+ qemu_get_sbe32s(f, &env->last_way);
+ qemu_get_sbe32s(f, &env->id_tlbs);
+ qemu_get_sbe32s(f, &env->nb_pids);
+ if (env->tlb) {
+ // XXX assumes 6xx
+ for (i = 0; i < env->nb_tlb; i++) {
+ qemu_get_betls(f, &env->tlb[i].tlb6.pte0);
+ qemu_get_betls(f, &env->tlb[i].tlb6.pte1);
+ qemu_get_betls(f, &env->tlb[i].tlb6.EPN);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ qemu_get_betls(f, &env->pb[i]);
+#endif
+ for (i = 0; i < 1024; i++)
+ qemu_get_betls(f, &env->spr[i]);
+ qemu_get_be32s(f, &env->vscr);
+ qemu_get_be64s(f, &env->spe_acc);
+ qemu_get_be32s(f, &env->spe_fscr);
+ qemu_get_betls(f, &env->msr_mask);
+ qemu_get_be32s(f, &env->flags);
+ qemu_get_sbe32s(f, &env->error_code);
+ qemu_get_be32s(f, &env->pending_interrupts);
+#if !defined(CONFIG_USER_ONLY)
+ qemu_get_be32s(f, &env->irq_input_state);
+ for (i = 0; i < POWERPC_EXCP_NB; i++)
+ qemu_get_betls(f, &env->excp_vectors[i]);
+ qemu_get_betls(f, &env->excp_prefix);
+ qemu_get_betls(f, &env->hreset_excp_prefix);
+ qemu_get_betls(f, &env->ivor_mask);
+ qemu_get_betls(f, &env->ivpr_mask);
+ qemu_get_betls(f, &env->hreset_vector);
+#endif
+ qemu_get_betls(f, &env->nip);
+ qemu_get_betls(f, &env->hflags);
+ qemu_get_betls(f, &env->hflags_nmsr);
+ qemu_get_sbe32s(f, &env->mmu_idx);
+ qemu_get_sbe32s(f, &env->power_mode);
+
+ return 0;
+}
diff --git a/target-ppc/mfrom_table.c b/target-ppc/mfrom_table.c
new file mode 100644
index 0000000..6a1fa37
--- /dev/null
+++ b/target-ppc/mfrom_table.c
@@ -0,0 +1,79 @@
+static const uint8_t mfrom_ROM_table[602] =
+{
+ 77, 77, 76, 76, 75, 75, 74, 74,
+ 73, 73, 72, 72, 71, 71, 70, 70,
+ 69, 69, 68, 68, 68, 67, 67, 66,
+ 66, 65, 65, 64, 64, 64, 63, 63,
+ 62, 62, 61, 61, 61, 60, 60, 59,
+ 59, 58, 58, 58, 57, 57, 56, 56,
+ 56, 55, 55, 54, 54, 54, 53, 53,
+ 53, 52, 52, 51, 51, 51, 50, 50,
+ 50, 49, 49, 49, 48, 48, 47, 47,
+ 47, 46, 46, 46, 45, 45, 45, 44,
+ 44, 44, 43, 43, 43, 42, 42, 42,
+ 42, 41, 41, 41, 40, 40, 40, 39,
+ 39, 39, 39, 38, 38, 38, 37, 37,
+ 37, 37, 36, 36, 36, 35, 35, 35,
+ 35, 34, 34, 34, 34, 33, 33, 33,
+ 33, 32, 32, 32, 32, 31, 31, 31,
+ 31, 30, 30, 30, 30, 29, 29, 29,
+ 29, 28, 28, 28, 28, 28, 27, 27,
+ 27, 27, 26, 26, 26, 26, 26, 25,
+ 25, 25, 25, 25, 24, 24, 24, 24,
+ 24, 23, 23, 23, 23, 23, 23, 22,
+ 22, 22, 22, 22, 21, 21, 21, 21,
+ 21, 21, 20, 20, 20, 20, 20, 20,
+ 19, 19, 19, 19, 19, 19, 19, 18,
+ 18, 18, 18, 18, 18, 17, 17, 17,
+ 17, 17, 17, 17, 16, 16, 16, 16,
+ 16, 16, 16, 16, 15, 15, 15, 15,
+ 15, 15, 15, 15, 14, 14, 14, 14,
+ 14, 14, 14, 14, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0,
+};
diff --git a/target-ppc/mfrom_table_gen.c b/target-ppc/mfrom_table_gen.c
new file mode 100644
index 0000000..4c06aa4
--- /dev/null
+++ b/target-ppc/mfrom_table_gen.c
@@ -0,0 +1,33 @@
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <stdio.h>
+#include <math.h>
+
+int main (void)
+{
+ double d;
+ uint8_t n;
+ int i;
+
+ printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
+ for (i = 0; i < 602; i++) {
+ /* Extremly decomposed:
+ * -T0 / 256
+ * T0 = 256 * log10(10 + 1.0) + 0.5
+ */
+ d = -i;
+ d /= 256.0;
+ d = exp10(d);
+ d += 1.0;
+ d = log10(d);
+ d *= 256;
+ d += 0.5;
+ n = d;
+ printf("%3d, ", n);
+ if ((i & 7) == 7)
+ printf("\n ");
+ }
+ printf("\n};\n");
+
+ return 0;
+}
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
new file mode 100644
index 0000000..17e070a
--- /dev/null
+++ b/target-ppc/op_helper.c
@@ -0,0 +1,4172 @@
+/*
+ * PowerPC emulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <string.h>
+#include "exec.h"
+#include "host-utils.h"
+#include "helper.h"
+
+#include "helper_regs.h"
+
+//#define DEBUG_OP
+//#define DEBUG_EXCEPTIONS
+//#define DEBUG_SOFTWARE_TLB
+
+#ifdef DEBUG_SOFTWARE_TLB
+# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_SWTLB(...) do { } while (0)
+#endif
+
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
+{
+#if 0
+ printf("Raise exception %3x code : %d\n", exception, error_code);
+#endif
+ env->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit();
+}
+
+void helper_raise_exception (uint32_t exception)
+{
+ helper_raise_exception_err(exception, 0);
+}
+
+/*****************************************************************************/
+/* SPR accesses */
+void helper_load_dump_spr (uint32_t sprn)
+{
+ qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+void helper_store_dump_spr (uint32_t sprn)
+{
+ qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+target_ulong helper_load_tbl (void)
+{
+ return (target_ulong)cpu_ppc_load_tbl(env);
+}
+
+target_ulong helper_load_tbu (void)
+{
+ return cpu_ppc_load_tbu(env);
+}
+
+target_ulong helper_load_atbl (void)
+{
+ return (target_ulong)cpu_ppc_load_atbl(env);
+}
+
+target_ulong helper_load_atbu (void)
+{
+ return cpu_ppc_load_atbu(env);
+}
+
+target_ulong helper_load_601_rtcl (void)
+{
+ return cpu_ppc601_load_rtcl(env);
+}
+
+target_ulong helper_load_601_rtcu (void)
+{
+ return cpu_ppc601_load_rtcu(env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+#if defined (TARGET_PPC64)
+void helper_store_asr (target_ulong val)
+{
+ ppc_store_asr(env, val);
+}
+#endif
+
+void helper_store_sdr1 (target_ulong val)
+{
+ ppc_store_sdr1(env, val);
+}
+
+void helper_store_tbl (target_ulong val)
+{
+ cpu_ppc_store_tbl(env, val);
+}
+
+void helper_store_tbu (target_ulong val)
+{
+ cpu_ppc_store_tbu(env, val);
+}
+
+void helper_store_atbl (target_ulong val)
+{
+ cpu_ppc_store_atbl(env, val);
+}
+
+void helper_store_atbu (target_ulong val)
+{
+ cpu_ppc_store_atbu(env, val);
+}
+
+void helper_store_601_rtcl (target_ulong val)
+{
+ cpu_ppc601_store_rtcl(env, val);
+}
+
+void helper_store_601_rtcu (target_ulong val)
+{
+ cpu_ppc601_store_rtcu(env, val);
+}
+
+target_ulong helper_load_decr (void)
+{
+ return cpu_ppc_load_decr(env);
+}
+
+void helper_store_decr (target_ulong val)
+{
+ cpu_ppc_store_decr(env, val);
+}
+
+void helper_store_hid0_601 (target_ulong val)
+{
+ target_ulong hid0;
+
+ hid0 = env->spr[SPR_HID0];
+ if ((val ^ hid0) & 0x00000008) {
+ /* Change current endianness */
+ env->hflags &= ~(1 << MSR_LE);
+ env->hflags_nmsr &= ~(1 << MSR_LE);
+ env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
+ env->hflags |= env->hflags_nmsr;
+ qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
+ val & 0x8 ? 'l' : 'b', env->hflags);
+ }
+ env->spr[SPR_HID0] = (uint32_t)val;
+}
+
+void helper_store_403_pbr (uint32_t num, target_ulong value)
+{
+ if (likely(env->pb[num] != value)) {
+ env->pb[num] = value;
+ /* Should be optimized */
+ tlb_flush(env, 1);
+ }
+}
+
+target_ulong helper_load_40x_pit (void)
+{
+ return load_40x_pit(env);
+}
+
+void helper_store_40x_pit (target_ulong val)
+{
+ store_40x_pit(env, val);
+}
+
+void helper_store_40x_dbcr0 (target_ulong val)
+{
+ store_40x_dbcr0(env, val);
+}
+
+void helper_store_40x_sler (target_ulong val)
+{
+ store_40x_sler(env, val);
+}
+
+void helper_store_booke_tcr (target_ulong val)
+{
+ store_booke_tcr(env, val);
+}
+
+void helper_store_booke_tsr (target_ulong val)
+{
+ store_booke_tsr(env, val);
+}
+
+void helper_store_ibatu (uint32_t nr, target_ulong val)
+{
+ ppc_store_ibatu(env, nr, val);
+}
+
+void helper_store_ibatl (uint32_t nr, target_ulong val)
+{
+ ppc_store_ibatl(env, nr, val);
+}
+
+void helper_store_dbatu (uint32_t nr, target_ulong val)
+{
+ ppc_store_dbatu(env, nr, val);
+}
+
+void helper_store_dbatl (uint32_t nr, target_ulong val)
+{
+ ppc_store_dbatl(env, nr, val);
+}
+
+void helper_store_601_batl (uint32_t nr, target_ulong val)
+{
+ ppc_store_ibatl_601(env, nr, val);
+}
+
+void helper_store_601_batu (uint32_t nr, target_ulong val)
+{
+ ppc_store_ibatu_601(env, nr, val);
+}
+#endif
+
+/*****************************************************************************/
+/* Memory load and stores */
+
+static inline target_ulong addr_add(target_ulong addr, target_long arg)
+{
+#if defined(TARGET_PPC64)
+ if (!msr_sf)
+ return (uint32_t)(addr + arg);
+ else
+#endif
+ return addr + arg;
+}
+
+void helper_lmw (target_ulong addr, uint32_t reg)
+{
+ for (; reg < 32; reg++) {
+ if (msr_le)
+ env->gpr[reg] = bswap32(ldl(addr));
+ else
+ env->gpr[reg] = ldl(addr);
+ addr = addr_add(addr, 4);
+ }
+}
+
+void helper_stmw (target_ulong addr, uint32_t reg)
+{
+ for (; reg < 32; reg++) {
+ if (msr_le)
+ stl(addr, bswap32((uint32_t)env->gpr[reg]));
+ else
+ stl(addr, (uint32_t)env->gpr[reg]);
+ addr = addr_add(addr, 4);
+ }
+}
+
+void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ int sh;
+ for (; nb > 3; nb -= 4) {
+ env->gpr[reg] = ldl(addr);
+ reg = (reg + 1) % 32;
+ addr = addr_add(addr, 4);
+ }
+ if (unlikely(nb > 0)) {
+ env->gpr[reg] = 0;
+ for (sh = 24; nb > 0; nb--, sh -= 8) {
+ env->gpr[reg] |= ldub(addr) << sh;
+ addr = addr_add(addr, 1);
+ }
+ }
+}
+/* PPC32 specification says we must generate an exception if
+ * rA is in the range of registers to be loaded.
+ * In an other hand, IBM says this is valid, but rA won't be loaded.
+ * For now, I'll follow the spec...
+ */
+void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
+{
+ if (likely(xer_bc != 0)) {
+ if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
+ (reg < rb && (reg + xer_bc) > rb))) {
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_LSWX);
+ } else {
+ helper_lsw(addr, xer_bc, reg);
+ }
+ }
+}
+
+void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ int sh;
+ for (; nb > 3; nb -= 4) {
+ stl(addr, env->gpr[reg]);
+ reg = (reg + 1) % 32;
+ addr = addr_add(addr, 4);
+ }
+ if (unlikely(nb > 0)) {
+ for (sh = 24; nb > 0; nb--, sh -= 8) {
+ stb(addr, (env->gpr[reg] >> sh) & 0xFF);
+ addr = addr_add(addr, 1);
+ }
+ }
+}
+
+static void do_dcbz(target_ulong addr, int dcache_line_size)
+{
+ addr &= ~(dcache_line_size - 1);
+ int i;
+ for (i = 0 ; i < dcache_line_size ; i += 4) {
+ stl(addr + i , 0);
+ }
+ if (env->reserve_addr == addr)
+ env->reserve_addr = (target_ulong)-1ULL;
+}
+
+void helper_dcbz(target_ulong addr)
+{
+ do_dcbz(addr, env->dcache_line_size);
+}
+
+void helper_dcbz_970(target_ulong addr)
+{
+ if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
+ do_dcbz(addr, 32);
+ else
+ do_dcbz(addr, env->dcache_line_size);
+}
+
+void helper_icbi(target_ulong addr)
+{
+ addr &= ~(env->dcache_line_size - 1);
+ /* Invalidate one cache line :
+ * PowerPC specification says this is to be treated like a load
+ * (not a fetch) by the MMU. To be sure it will be so,
+ * do the load "by hand".
+ */
+ ldl(addr);
+ tb_invalidate_page_range(addr, addr + env->icache_line_size);
+}
+
+// XXX: to be tested
+target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
+{
+ int i, c, d;
+ d = 24;
+ for (i = 0; i < xer_bc; i++) {
+ c = ldub(addr);
+ addr = addr_add(addr, 1);
+ /* ra (if not 0) and rb are never modified */
+ if (likely(reg != rb && (ra == 0 || reg != ra))) {
+ env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
+ }
+ if (unlikely(c == xer_cmp))
+ break;
+ if (likely(d != 0)) {
+ d -= 8;
+ } else {
+ d = 24;
+ reg++;
+ reg = reg & 0x1F;
+ }
+ }
+ return i;
+}
+
+/*****************************************************************************/
+/* Fixed point operations helpers */
+#if defined(TARGET_PPC64)
+
+/* multiply high word */
+uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
+{
+ uint64_t tl, th;
+
+ muls64(&tl, &th, arg1, arg2);
+ return th;
+}
+
+/* multiply high word unsigned */
+uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
+{
+ uint64_t tl, th;
+
+ mulu64(&tl, &th, arg1, arg2);
+ return th;
+}
+
+uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
+{
+ int64_t th;
+ uint64_t tl;
+
+ muls64(&tl, (uint64_t *)&th, arg1, arg2);
+ /* If th != 0 && th != -1, then we had an overflow */
+ if (likely((uint64_t)(th + 1) <= 1)) {
+ env->xer &= ~(1 << XER_OV);
+ } else {
+ env->xer |= (1 << XER_OV) | (1 << XER_SO);
+ }
+ return (int64_t)tl;
+}
+#endif
+
+target_ulong helper_cntlzw (target_ulong t)
+{
+ return clz32(t);
+}
+
+#if defined(TARGET_PPC64)
+target_ulong helper_cntlzd (target_ulong t)
+{
+ return clz64(t);
+}
+#endif
+
+/* shift right arithmetic helper */
+target_ulong helper_sraw (target_ulong value, target_ulong shift)
+{
+ int32_t ret;
+
+ if (likely(!(shift & 0x20))) {
+ if (likely((uint32_t)shift != 0)) {
+ shift &= 0x1f;
+ ret = (int32_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
+ env->xer &= ~(1 << XER_CA);
+ } else {
+ env->xer |= (1 << XER_CA);
+ }
+ } else {
+ ret = (int32_t)value;
+ env->xer &= ~(1 << XER_CA);
+ }
+ } else {
+ ret = (int32_t)value >> 31;
+ if (ret) {
+ env->xer |= (1 << XER_CA);
+ } else {
+ env->xer &= ~(1 << XER_CA);
+ }
+ }
+ return (target_long)ret;
+}
+
+#if defined(TARGET_PPC64)
+target_ulong helper_srad (target_ulong value, target_ulong shift)
+{
+ int64_t ret;
+
+ if (likely(!(shift & 0x40))) {
+ if (likely((uint64_t)shift != 0)) {
+ shift &= 0x3f;
+ ret = (int64_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
+ env->xer &= ~(1 << XER_CA);
+ } else {
+ env->xer |= (1 << XER_CA);
+ }
+ } else {
+ ret = (int64_t)value;
+ env->xer &= ~(1 << XER_CA);
+ }
+ } else {
+ ret = (int64_t)value >> 63;
+ if (ret) {
+ env->xer |= (1 << XER_CA);
+ } else {
+ env->xer &= ~(1 << XER_CA);
+ }
+ }
+ return ret;
+}
+#endif
+
+target_ulong helper_popcntb (target_ulong val)
+{
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
+ return val;
+}
+
+#if defined(TARGET_PPC64)
+target_ulong helper_popcntb_64 (target_ulong val)
+{
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
+ return val;
+}
+#endif
+
+/*****************************************************************************/
+/* Floating point operations helpers */
+uint64_t helper_float32_to_float64(uint32_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+ f.l = arg;
+ d.d = float32_to_float64(f.f, &env->fp_status);
+ return d.ll;
+}
+
+uint32_t helper_float64_to_float32(uint64_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+ d.ll = arg;
+ f.f = float64_to_float32(d.d, &env->fp_status);
+ return f.l;
+}
+
+static inline int isden(float64 d)
+{
+ CPU_DoubleU u;
+
+ u.d = d;
+
+ return ((u.ll >> 52) & 0x7FF) == 0;
+}
+
+uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
+{
+ CPU_DoubleU farg;
+ int isneg;
+ int ret;
+ farg.ll = arg;
+ isneg = float64_is_neg(farg.d);
+ if (unlikely(float64_is_any_nan(farg.d))) {
+ if (float64_is_signaling_nan(farg.d)) {
+ /* Signaling NaN: flags are undefined */
+ ret = 0x00;
+ } else {
+ /* Quiet NaN */
+ ret = 0x11;
+ }
+ } else if (unlikely(float64_is_infinity(farg.d))) {
+ /* +/- infinity */
+ if (isneg)
+ ret = 0x09;
+ else
+ ret = 0x05;
+ } else {
+ if (float64_is_zero(farg.d)) {
+ /* +/- zero */
+ if (isneg)
+ ret = 0x12;
+ else
+ ret = 0x02;
+ } else {
+ if (isden(farg.d)) {
+ /* Denormalized numbers */
+ ret = 0x10;
+ } else {
+ /* Normalized numbers */
+ ret = 0x00;
+ }
+ if (isneg) {
+ ret |= 0x08;
+ } else {
+ ret |= 0x04;
+ }
+ }
+ }
+ if (set_fprf) {
+ /* We update FPSCR_FPRF */
+ env->fpscr &= ~(0x1F << FPSCR_FPRF);
+ env->fpscr |= ret << FPSCR_FPRF;
+ }
+ /* We just need fpcc to update Rc1 */
+ return ret & 0xF;
+}
+
+/* Floating-point invalid operations exception */
+static inline uint64_t fload_invalid_op_excp(int op)
+{
+ uint64_t ret = 0;
+ int ve;
+
+ ve = fpscr_ve;
+ switch (op) {
+ case POWERPC_EXCP_FP_VXSNAN:
+ env->fpscr |= 1 << FPSCR_VXSNAN;
+ break;
+ case POWERPC_EXCP_FP_VXSOFT:
+ env->fpscr |= 1 << FPSCR_VXSOFT;
+ break;
+ case POWERPC_EXCP_FP_VXISI:
+ /* Magnitude subtraction of infinities */
+ env->fpscr |= 1 << FPSCR_VXISI;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXIDI:
+ /* Division of infinity by infinity */
+ env->fpscr |= 1 << FPSCR_VXIDI;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXZDZ:
+ /* Division of zero by zero */
+ env->fpscr |= 1 << FPSCR_VXZDZ;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXIMZ:
+ /* Multiplication of zero by infinity */
+ env->fpscr |= 1 << FPSCR_VXIMZ;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXVC:
+ /* Ordered comparison of NaN */
+ env->fpscr |= 1 << FPSCR_VXVC;
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ /* We must update the target FPR before raising the exception */
+ if (ve != 0) {
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* Exception is differed */
+ ve = 0;
+ }
+ break;
+ case POWERPC_EXCP_FP_VXSQRT:
+ /* Square root of a negative number */
+ env->fpscr |= 1 << FPSCR_VXSQRT;
+ update_arith:
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ if (ve == 0) {
+ /* Set the result to quiet NaN */
+ ret = 0x7FF8000000000000ULL;
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ }
+ break;
+ case POWERPC_EXCP_FP_VXCVI:
+ /* Invalid conversion */
+ env->fpscr |= 1 << FPSCR_VXCVI;
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ if (ve == 0) {
+ /* Set the result to quiet NaN */
+ ret = 0x7FF8000000000000ULL;
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ }
+ break;
+ }
+ /* Update the floating-point invalid operation summary */
+ env->fpscr |= 1 << FPSCR_VX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= 1 << FPSCR_FX;
+ if (ve != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ if (msr_fe0 != 0 || msr_fe1 != 0)
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
+ }
+ return ret;
+}
+
+static inline void float_zero_divide_excp(void)
+{
+ env->fpscr |= 1 << FPSCR_ZX;
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ /* Update the floating-point exception summary */
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ze != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ if (msr_fe0 != 0 || msr_fe1 != 0) {
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
+ }
+ }
+}
+
+static inline void float_overflow_excp(void)
+{
+ env->fpscr |= 1 << FPSCR_OX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_oe != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
+ } else {
+ env->fpscr |= 1 << FPSCR_XX;
+ env->fpscr |= 1 << FPSCR_FI;
+ }
+}
+
+static inline void float_underflow_excp(void)
+{
+ env->fpscr |= 1 << FPSCR_UX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ue != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
+ }
+}
+
+static inline void float_inexact_excp(void)
+{
+ env->fpscr |= 1 << FPSCR_XX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_xe != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
+ }
+}
+
+static inline void fpscr_set_rounding_mode(void)
+{
+ int rnd_type;
+
+ /* Set rounding mode */
+ switch (fpscr_rn) {
+ case 0:
+ /* Best approximation (round to nearest) */
+ rnd_type = float_round_nearest_even;
+ break;
+ case 1:
+ /* Smaller magnitude (round toward zero) */
+ rnd_type = float_round_to_zero;
+ break;
+ case 2:
+ /* Round toward +infinite */
+ rnd_type = float_round_up;
+ break;
+ default:
+ case 3:
+ /* Round toward -infinite */
+ rnd_type = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->fp_status);
+}
+
+void helper_fpscr_clrbit (uint32_t bit)
+{
+ int prev;
+
+ prev = (env->fpscr >> bit) & 1;
+ env->fpscr &= ~(1 << bit);
+ if (prev == 1) {
+ switch (bit) {
+ case FPSCR_RN1:
+ case FPSCR_RN:
+ fpscr_set_rounding_mode();
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void helper_fpscr_setbit (uint32_t bit)
+{
+ int prev;
+
+ prev = (env->fpscr >> bit) & 1;
+ env->fpscr |= 1 << bit;
+ if (prev == 0) {
+ switch (bit) {
+ case FPSCR_VX:
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ve)
+ goto raise_ve;
+ case FPSCR_OX:
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_oe)
+ goto raise_oe;
+ break;
+ case FPSCR_UX:
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ue)
+ goto raise_ue;
+ break;
+ case FPSCR_ZX:
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ze)
+ goto raise_ze;
+ break;
+ case FPSCR_XX:
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_xe)
+ goto raise_xe;
+ break;
+ case FPSCR_VXSNAN:
+ case FPSCR_VXISI:
+ case FPSCR_VXIDI:
+ case FPSCR_VXZDZ:
+ case FPSCR_VXIMZ:
+ case FPSCR_VXVC:
+ case FPSCR_VXSOFT:
+ case FPSCR_VXSQRT:
+ case FPSCR_VXCVI:
+ env->fpscr |= 1 << FPSCR_VX;
+ env->fpscr |= 1 << FPSCR_FX;
+ if (fpscr_ve != 0)
+ goto raise_ve;
+ break;
+ case FPSCR_VE:
+ if (fpscr_vx != 0) {
+ raise_ve:
+ env->error_code = POWERPC_EXCP_FP;
+ if (fpscr_vxsnan)
+ env->error_code |= POWERPC_EXCP_FP_VXSNAN;
+ if (fpscr_vxisi)
+ env->error_code |= POWERPC_EXCP_FP_VXISI;
+ if (fpscr_vxidi)
+ env->error_code |= POWERPC_EXCP_FP_VXIDI;
+ if (fpscr_vxzdz)
+ env->error_code |= POWERPC_EXCP_FP_VXZDZ;
+ if (fpscr_vximz)
+ env->error_code |= POWERPC_EXCP_FP_VXIMZ;
+ if (fpscr_vxvc)
+ env->error_code |= POWERPC_EXCP_FP_VXVC;
+ if (fpscr_vxsoft)
+ env->error_code |= POWERPC_EXCP_FP_VXSOFT;
+ if (fpscr_vxsqrt)
+ env->error_code |= POWERPC_EXCP_FP_VXSQRT;
+ if (fpscr_vxcvi)
+ env->error_code |= POWERPC_EXCP_FP_VXCVI;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_OE:
+ if (fpscr_ox != 0) {
+ raise_oe:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_UE:
+ if (fpscr_ux != 0) {
+ raise_ue:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_ZE:
+ if (fpscr_zx != 0) {
+ raise_ze:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_XE:
+ if (fpscr_xx != 0) {
+ raise_xe:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_RN1:
+ case FPSCR_RN:
+ fpscr_set_rounding_mode();
+ break;
+ default:
+ break;
+ raise_excp:
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We have to update Rc1 before raising the exception */
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ break;
+ }
+ }
+}
+
+void helper_store_fpscr (uint64_t arg, uint32_t mask)
+{
+ /*
+ * We use only the 32 LSB of the incoming fpr
+ */
+ uint32_t prev, new;
+ int i;
+
+ prev = env->fpscr;
+ new = (uint32_t)arg;
+ new &= ~0x60000000;
+ new |= prev & 0x60000000;
+ for (i = 0; i < 8; i++) {
+ if (mask & (1 << i)) {
+ env->fpscr &= ~(0xF << (4 * i));
+ env->fpscr |= new & (0xF << (4 * i));
+ }
+ }
+ /* Update VX and FEX */
+ if (fpscr_ix != 0)
+ env->fpscr |= 1 << FPSCR_VX;
+ else
+ env->fpscr &= ~(1 << FPSCR_VX);
+ if ((fpscr_ex & fpscr_eex) != 0) {
+ env->fpscr |= 1 << FPSCR_FEX;
+ env->exception_index = POWERPC_EXCP_PROGRAM;
+ /* XXX: we should compute it properly */
+ env->error_code = POWERPC_EXCP_FP;
+ }
+ else
+ env->fpscr &= ~(1 << FPSCR_FEX);
+ fpscr_set_rounding_mode();
+}
+
+void helper_float_check_status (void)
+{
+#ifdef CONFIG_SOFTFLOAT
+ if (env->exception_index == POWERPC_EXCP_PROGRAM &&
+ (env->error_code & POWERPC_EXCP_FP)) {
+ /* Differred floating-point exception after target FPR update */
+ if (msr_fe0 != 0 || msr_fe1 != 0)
+ helper_raise_exception_err(env->exception_index, env->error_code);
+ } else {
+ int status = get_float_exception_flags(&env->fp_status);
+ if (status & float_flag_divbyzero) {
+ float_zero_divide_excp();
+ } else if (status & float_flag_overflow) {
+ float_overflow_excp();
+ } else if (status & float_flag_underflow) {
+ float_underflow_excp();
+ } else if (status & float_flag_inexact) {
+ float_inexact_excp();
+ }
+ }
+#else
+ if (env->exception_index == POWERPC_EXCP_PROGRAM &&
+ (env->error_code & POWERPC_EXCP_FP)) {
+ /* Differred floating-point exception after target FPR update */
+ if (msr_fe0 != 0 || msr_fe1 != 0)
+ helper_raise_exception_err(env->exception_index, env->error_code);
+ }
+#endif
+}
+
+#ifdef CONFIG_SOFTFLOAT
+void helper_reset_fpstatus (void)
+{
+ set_float_exception_flags(0, &env->fp_status);
+}
+#endif
+
+/* fadd - fadd. */
+uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
+ float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
+ /* sNaN addition */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fsub - fsub. */
+uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
+ float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
+ /* sNaN subtraction */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fmul - fmul. */
+uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
+ /* sNaN multiplication */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fdiv - fdiv. */
+uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
+ /* Division of infinity by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
+ } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
+ /* Division of zero by zero */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
+ /* sNaN division */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fabs */
+uint64_t helper_fabs (uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+ farg.d = float64_abs(farg.d);
+ return farg.ll;
+}
+
+/* fnabs */
+uint64_t helper_fnabs (uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+ farg.d = float64_abs(farg.d);
+ farg.d = float64_chs(farg.d);
+ return farg.ll;
+}
+
+/* fneg */
+uint64_t helper_fneg (uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+ farg.d = float64_chs(farg.d);
+ return farg.ll;
+}
+
+/* fctiw - fctiw. */
+uint64_t helper_fctiw (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
+ /* qNan / infinity conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ } else {
+ farg.ll = float64_to_int32(farg.d, &env->fp_status);
+ /* XXX: higher bits are not supposed to be significant.
+ * to make tests easier, return the same as a real PowerPC 750
+ */
+ farg.ll |= 0xFFF80000ULL << 32;
+ }
+ return farg.ll;
+}
+
+/* fctiwz - fctiwz. */
+uint64_t helper_fctiwz (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
+ /* qNan / infinity conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ } else {
+ farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
+ /* XXX: higher bits are not supposed to be significant.
+ * to make tests easier, return the same as a real PowerPC 750
+ */
+ farg.ll |= 0xFFF80000ULL << 32;
+ }
+ return farg.ll;
+}
+
+#if defined(TARGET_PPC64)
+/* fcfid - fcfid. */
+uint64_t helper_fcfid (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.d = int64_to_float64(arg, &env->fp_status);
+ return farg.ll;
+}
+
+/* fctid - fctid. */
+uint64_t helper_fctid (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
+ /* qNan / infinity conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ } else {
+ farg.ll = float64_to_int64(farg.d, &env->fp_status);
+ }
+ return farg.ll;
+}
+
+/* fctidz - fctidz. */
+uint64_t helper_fctidz (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
+ /* qNan / infinity conversion */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ } else {
+ farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
+ }
+ return farg.ll;
+}
+
+#endif
+
+static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN round */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
+ /* qNan / infinity round */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ } else {
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ farg.ll = float64_round_to_int(farg.d, &env->fp_status);
+ /* Restore rounding mode from FPSCR */
+ fpscr_set_rounding_mode();
+ }
+ return farg.ll;
+}
+
+uint64_t helper_frin (uint64_t arg)
+{
+ return do_fri(arg, float_round_nearest_even);
+}
+
+uint64_t helper_friz (uint64_t arg)
+{
+ return do_fri(arg, float_round_to_zero);
+}
+
+uint64_t helper_frip (uint64_t arg)
+{
+ return do_fri(arg, float_round_up);
+}
+
+uint64_t helper_frim (uint64_t arg)
+{
+ return do_fri(arg, float_round_down);
+}
+
+/* fmadd - fmadd. */
+uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
+ /* sNaN operation */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+#ifdef FLOAT128
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+#else
+ /* This is OK on x86 hosts */
+ farg1.d = (farg1.d * farg2.d) + farg3.d;
+#endif
+ }
+
+ return farg1.ll;
+}
+
+/* fmsub - fmsub. */
+uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
+ /* sNaN operation */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+#ifdef FLOAT128
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+#else
+ /* This is OK on x86 hosts */
+ farg1.d = (farg1.d * farg2.d) - farg3.d;
+#endif
+ }
+ return farg1.ll;
+}
+
+/* fnmadd - fnmadd. */
+uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
+ /* sNaN operation */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+#ifdef FLOAT128
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+#else
+ /* This is OK on x86 hosts */
+ farg1.d = (farg1.d * farg2.d) + farg3.d;
+#endif
+ if (likely(!float64_is_any_nan(farg1.d))) {
+ farg1.d = float64_chs(farg1.d);
+ }
+ }
+ return farg1.ll;
+}
+
+/* fnmsub - fnmsub. */
+uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
+ /* sNaN operation */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+#ifdef FLOAT128
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+#else
+ /* This is OK on x86 hosts */
+ farg1.d = (farg1.d * farg2.d) - farg3.d;
+#endif
+ if (likely(!float64_is_any_nan(farg1.d))) {
+ farg1.d = float64_chs(farg1.d);
+ }
+ }
+ return farg1.ll;
+}
+
+/* frsp - frsp. */
+uint64_t helper_frsp (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN square root */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* fsqrt - fsqrt. */
+uint64_t helper_fsqrt (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Square root of a negative nonzero number */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN square root */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
+ }
+ return farg.ll;
+}
+
+/* fre - fre. */
+uint64_t helper_fre (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN reciprocal */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ return farg.d;
+}
+
+/* fres - fres. */
+uint64_t helper_fres (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN reciprocal */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* frsqrte - frsqrte. */
+uint64_t helper_frsqrte (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Reciprocal square root of a negative nonzero number */
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
+ /* sNaN reciprocal square root */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+ }
+ return farg.ll;
+}
+
+/* fsel - fsel. */
+uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ CPU_DoubleU farg1;
+
+ farg1.ll = arg1;
+
+ if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
+ return arg2;
+ } else {
+ return arg3;
+ }
+}
+
+void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~(0x0F << FPSCR_FPRF);
+ env->fpscr |= ret << FPSCR_FPRF;
+ env->crf[crfD] = ret;
+ if (unlikely(ret == 0x01UL
+ && (float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d)))) {
+ /* sNaN comparison */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ }
+}
+
+void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~(0x0F << FPSCR_FPRF);
+ env->fpscr |= ret << FPSCR_FPRF;
+ env->crf[crfD] = ret;
+ if (unlikely (ret == 0x01UL)) {
+ if (float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d)) {
+ /* sNaN comparison */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ POWERPC_EXCP_FP_VXVC);
+ } else {
+ /* qNaN comparison */
+ fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
+ }
+ }
+}
+
+#if !defined (CONFIG_USER_ONLY)
+void helper_store_msr (target_ulong val)
+{
+ val = hreg_store_msr(env, val, 0);
+ if (val != 0) {
+ env->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ helper_raise_exception(val);
+ }
+}
+
+static inline void do_rfi(target_ulong nip, target_ulong msr,
+ target_ulong msrm, int keep_msrh)
+{
+#if defined(TARGET_PPC64)
+ if (msr & (1ULL << MSR_SF)) {
+ nip = (uint64_t)nip;
+ msr &= (uint64_t)msrm;
+ } else {
+ nip = (uint32_t)nip;
+ msr = (uint32_t)(msr & msrm);
+ if (keep_msrh)
+ msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
+ }
+#else
+ nip = (uint32_t)nip;
+ msr &= (uint32_t)msrm;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+#if defined (DEBUG_OP)
+ cpu_dump_rfi(env->nip, env->msr);
+#endif
+ /* No need to raise an exception here,
+ * as rfi is always the last insn of a TB
+ */
+ env->interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
+
+void helper_rfi (void)
+{
+ do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ ~((target_ulong)0x783F0000), 1);
+}
+
+#if defined(TARGET_PPC64)
+void helper_rfid (void)
+{
+ do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ ~((target_ulong)0x783F0000), 0);
+}
+
+void helper_hrfid (void)
+{
+ do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
+ ~((target_ulong)0x783F0000), 0);
+}
+#endif
+#endif
+
+void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
+{
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
+{
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+}
+#endif
+
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+target_ulong helper_clcs (uint32_t arg)
+{
+ switch (arg) {
+ case 0x0CUL:
+ /* Instruction cache line size */
+ return env->icache_line_size;
+ break;
+ case 0x0DUL:
+ /* Data cache line size */
+ return env->dcache_line_size;
+ break;
+ case 0x0EUL:
+ /* Minimum cache line size */
+ return (env->icache_line_size < env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ break;
+ case 0x0FUL:
+ /* Maximum cache line size */
+ return (env->icache_line_size > env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ break;
+ default:
+ /* Undefined */
+ return 0;
+ break;
+ }
+}
+
+target_ulong helper_div (target_ulong arg1, target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ return tmp / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->xer |= (1 << XER_OV) | (1 << XER_SO);
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ tmp /= (int32_t)arg2;
+ if ((int32_t)tmp != tmp) {
+ env->xer |= (1 << XER_OV) | (1 << XER_SO);
+ } else {
+ env->xer &= ~(1 << XER_OV);
+ }
+ return tmp;
+ }
+}
+
+target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->xer |= (1 << XER_OV) | (1 << XER_SO);
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->xer &= ~(1 << XER_OV);
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+#if !defined (CONFIG_USER_ONLY)
+target_ulong helper_rac (target_ulong addr)
+{
+ mmu_ctx_t ctx;
+ int nb_BATs;
+ target_ulong ret = 0;
+
+ /* We don't have to generate many instances of this instruction,
+ * as rac is supervisor only.
+ */
+ /* XXX: FIX THIS: Pretend we have no BAT */
+ nb_BATs = env->nb_BATs;
+ env->nb_BATs = 0;
+ if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
+ ret = ctx.raddr;
+ env->nb_BATs = nb_BATs;
+ return ret;
+}
+
+void helper_rfsvc (void)
+{
+ do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
+}
+#endif
+
+/*****************************************************************************/
+/* 602 specific instructions */
+/* mfrom is the most crazy instruction ever seen, imho ! */
+/* Real implementation uses a ROM table. Do the same */
+/* Extremly decomposed:
+ * -arg / 256
+ * return 256 * log10(10 + 1.0) + 0.5
+ */
+#if !defined (CONFIG_USER_ONLY)
+target_ulong helper_602_mfrom (target_ulong arg)
+{
+ if (likely(arg < 602)) {
+#include "mfrom_table.c"
+ return mfrom_ROM_table[arg];
+ } else {
+ return 0;
+ }
+}
+#endif
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+
+/* XXX: to be improved to check access rights when in user-mode */
+target_ulong helper_load_dcr (target_ulong dcrn)
+{
+ uint32_t val = 0;
+
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log("No DCR environment\n");
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
+ qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
+ }
+ return val;
+}
+
+void helper_store_dcr (target_ulong dcrn, target_ulong val)
+{
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log("No DCR environment\n");
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
+ qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
+ helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_40x_rfci (void)
+{
+ do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
+ ~((target_ulong)0xFFFF0000), 0);
+}
+
+void helper_rfci (void)
+{
+ do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
+}
+
+void helper_rfdi (void)
+{
+ do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
+}
+
+void helper_rfmci (void)
+{
+ do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
+}
+#endif
+
+/* 440 specific */
+target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
+{
+ target_ulong mask;
+ int i;
+
+ i = 1;
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((high & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x4;
+ }
+ goto done;
+ }
+ i++;
+ }
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((low & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x8;
+ }
+ goto done;
+ }
+ i++;
+ }
+ if (update_Rc) {
+ env->crf[0] = 0x2;
+ }
+ done:
+ env->xer = (env->xer & ~0x7F) | i;
+ if (update_Rc) {
+ env->crf[0] |= xer_so;
+ }
+ return i;
+}
+
+/*****************************************************************************/
+/* Altivec extension helpers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define HI_IDX 0
+#define LO_IDX 1
+#else
+#define HI_IDX 1
+#define LO_IDX 0
+#endif
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = 0; index < ARRAY_SIZE(r->element); index++)
+#else
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
+#endif
+
+/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
+ * execute the following block. */
+#define DO_HANDLE_NAN(result, x) \
+ if (float32_is_any_nan(x)) { \
+ CPU_FloatU __f; \
+ __f.f = x; \
+ __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
+ result = __f.f; \
+ } else
+
+#define HANDLE_NAN1(result, x) \
+ DO_HANDLE_NAN(result, x)
+#define HANDLE_NAN2(result, x, y) \
+ DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
+#define HANDLE_NAN3(result, x, y, z) \
+ DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
+
+/* Saturating arithmetic helpers. */
+#define SATCVT(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ if (x < (from_type)min) { \
+ r = min; \
+ *sat = 1; \
+ } else if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+#define SATCVTU(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
+SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
+SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
+
+SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
+SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
+SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
+SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
+SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
+SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
+#undef SATCVT
+#undef SATCVTU
+
+#define LVE(name, access, swap, element) \
+ void helper_##name (ppc_avr_t *r, target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX*(n_elems-1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if(msr_le) { \
+ r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
+ } else { \
+ r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
+ } \
+ }
+#define I(x) (x)
+LVE(lvebx, ldub, I, u8)
+LVE(lvehx, lduw, bswap16, u16)
+LVE(lvewx, ldl, bswap32, u32)
+#undef I
+#undef LVE
+
+void helper_lvsl (ppc_avr_t *r, target_ulong sh)
+{
+ int i, j = (sh & 0xf);
+
+ VECTOR_FOR_INORDER_I (i, u8) {
+ r->u8[i] = j++;
+ }
+}
+
+void helper_lvsr (ppc_avr_t *r, target_ulong sh)
+{
+ int i, j = 0x10 - (sh & 0xf);
+
+ VECTOR_FOR_INORDER_I (i, u8) {
+ r->u8[i] = j++;
+ }
+}
+
+#define STVE(name, access, swap, element) \
+ void helper_##name (ppc_avr_t *r, target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX*(n_elems-1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if(msr_le) { \
+ access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
+ } else { \
+ access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
+ } \
+ }
+#define I(x) (x)
+STVE(stvebx, stb, I, u8)
+STVE(stvehx, stw, bswap16, u16)
+STVE(stvewx, stl, bswap32, u32)
+#undef I
+#undef LVE
+
+void helper_mtvscr (ppc_avr_t *r)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ env->vscr = r->u32[3];
+#else
+ env->vscr = r->u32[0];
+#endif
+ set_flush_to_zero(vscr_nj, &env->vec_status);
+}
+
+void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = ~a->u32[i] < b->u32[i];
+ }
+}
+
+#define VARITH_DO(name, op, element) \
+void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = a->element[i] op b->element[i]; \
+ } \
+}
+#define VARITH(suffix, element) \
+ VARITH_DO(add##suffix, +, element) \
+ VARITH_DO(sub##suffix, -, element)
+VARITH(ubm, u8)
+VARITH(uhm, u16)
+VARITH(uwm, u32)
+#undef VARITH_DO
+#undef VARITH
+
+#define VARITHFP(suffix, func) \
+ void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
+ r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
+ } \
+ } \
+ }
+VARITHFP(addfp, float32_add)
+VARITHFP(subfp, float32_sub)
+#undef VARITHFP
+
+#define VARITHSAT_CASE(type, op, cvt, element) \
+ { \
+ type result = (type)a->element[i] op (type)b->element[i]; \
+ r->element[i] = cvt(result, &sat); \
+ }
+
+#define VARITHSAT_DO(name, op, optype, cvt, element) \
+ void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int sat = 0; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ switch (sizeof(r->element[0])) { \
+ case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
+ case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
+ case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
+ } \
+ } \
+ if (sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
+#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
+VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
+VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
+VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
+VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
+VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
+VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
+#undef VARITHSAT_CASE
+#undef VARITHSAT_DO
+#undef VARITHSAT_SIGNED
+#undef VARITHSAT_UNSIGNED
+
+#define VAVG_DO(name, element, etype) \
+ void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
+ r->element[i] = x >> 1; \
+ } \
+ }
+
+#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
+ VAVG_DO(avgs##type, signed_element, signed_type) \
+ VAVG_DO(avgu##type, unsigned_element, unsigned_type)
+VAVG(b, s8, int16_t, u8, uint16_t)
+VAVG(h, s16, int32_t, u16, uint32_t)
+VAVG(w, s32, int64_t, u32, uint64_t)
+#undef VAVG_DO
+#undef VAVG
+
+#define VCF(suffix, cvt, element) \
+ void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ float32 t = cvt(b->element[i], &env->vec_status); \
+ r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
+ } \
+ }
+VCF(ux, uint32_to_float32, u32)
+VCF(sx, int32_to_float32, s32)
+#undef VCF
+
+#define VCMP_DO(suffix, compare, element, record) \
+ void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint32_t ones = (uint32_t)-1; \
+ uint32_t all = ones; \
+ uint32_t none = 0; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
+ switch (sizeof (a->element[0])) { \
+ case 4: r->u32[i] = result; break; \
+ case 2: r->u16[i] = result; break; \
+ case 1: r->u8[i] = result; break; \
+ } \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMP(suffix, compare, element) \
+ VCMP_DO(suffix, compare, element, 0) \
+ VCMP_DO(suffix##_dot, compare, element, 1)
+VCMP(equb, ==, u8)
+VCMP(equh, ==, u16)
+VCMP(equw, ==, u32)
+VCMP(gtub, >, u8)
+VCMP(gtuh, >, u16)
+VCMP(gtuw, >, u32)
+VCMP(gtsb, >, s8)
+VCMP(gtsh, >, s16)
+VCMP(gtsw, >, s32)
+#undef VCMP_DO
+#undef VCMP
+
+#define VCMPFP_DO(suffix, compare, order, record) \
+ void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint32_t ones = (uint32_t)-1; \
+ uint32_t all = ones; \
+ uint32_t none = 0; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ uint32_t result; \
+ int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
+ if (rel == float_relation_unordered) { \
+ result = 0; \
+ } else if (rel compare order) { \
+ result = ones; \
+ } else { \
+ result = 0; \
+ } \
+ r->u32[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMPFP(suffix, compare, order) \
+ VCMPFP_DO(suffix, compare, order, 0) \
+ VCMPFP_DO(suffix##_dot, compare, order, 1)
+VCMPFP(eqfp, ==, float_relation_equal)
+VCMPFP(gefp, !=, float_relation_less)
+VCMPFP(gtfp, ==, float_relation_greater)
+#undef VCMPFP_DO
+#undef VCMPFP
+
+static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ int record)
+{
+ int i;
+ int all_in = 0;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
+ if (le_rel == float_relation_unordered) {
+ r->u32[i] = 0xc0000000;
+ /* ALL_IN does not need to be updated here. */
+ } else {
+ float32 bneg = float32_chs(b->f[i]);
+ int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
+ int le = le_rel != float_relation_greater;
+ int ge = ge_rel != float_relation_less;
+ r->u32[i] = ((!le) << 31) | ((!ge) << 30);
+ all_in |= (!le | !ge);
+ }
+ }
+ if (record) {
+ env->crf[6] = (all_in == 0) << 1;
+ }
+}
+
+void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ vcmpbfp_internal(r, a, b, 0);
+}
+
+void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ vcmpbfp_internal(r, a, b, 1);
+}
+
+#define VCT(suffix, satcvt, element) \
+ void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ int sat = 0; \
+ float_status s = env->vec_status; \
+ set_float_rounding_mode(float_round_to_zero, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ if (float32_is_any_nan(b->f[i])) { \
+ r->element[i] = 0; \
+ } else { \
+ float64 t = float32_to_float64(b->f[i], &s); \
+ int64_t j; \
+ t = float64_scalbn(t, uim, &s); \
+ j = float64_to_int64(t, &s); \
+ r->element[i] = satcvt(j, &sat); \
+ } \
+ } \
+ if (sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+VCT(uxs, cvtsduw, u32)
+VCT(sxs, cvtsdsw, s32)
+#undef VCT
+
+void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
+ /* Need to do the computation in higher precision and round
+ * once at the end. */
+ float64 af, bf, cf, t;
+ af = float32_to_float64(a->f[i], &env->vec_status);
+ bf = float32_to_float64(b->f[i], &env->vec_status);
+ cf = float32_to_float64(c->f[i], &env->vec_status);
+ t = float64_mul(af, cf, &env->vec_status);
+ t = float64_add(t, bf, &env->vec_status);
+ r->f[i] = float64_to_float32(t, &env->vec_status);
+ }
+ }
+}
+
+void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+ r->s16[i] = cvtswsh (t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+ r->s16[i] = cvtswsh (t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#define VMINMAX_DO(name, compare, element) \
+ void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ if (a->element[i] compare b->element[i]) { \
+ r->element[i] = b->element[i]; \
+ } else { \
+ r->element[i] = a->element[i]; \
+ } \
+ } \
+ }
+#define VMINMAX(suffix, element) \
+ VMINMAX_DO(min##suffix, >, element) \
+ VMINMAX_DO(max##suffix, <, element)
+VMINMAX(sb, s8)
+VMINMAX(sh, s16)
+VMINMAX(sw, s32)
+VMINMAX(ub, u8)
+VMINMAX(uh, u16)
+VMINMAX(uw, u32)
+#undef VMINMAX_DO
+#undef VMINMAX
+
+#define VMINMAXFP(suffix, rT, rF) \
+ void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
+ if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
+ r->f[i] = rT->f[i]; \
+ } else { \
+ r->f[i] = rF->f[i]; \
+ } \
+ } \
+ } \
+ }
+VMINMAXFP(minfp, a, b)
+VMINMAXFP(maxfp, b, a)
+#undef VMINMAXFP
+
+void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ r->s16[i] = (int16_t) (prod + c->s16[i]);
+ }
+}
+
+#define VMRG_DO(name, element, highp) \
+ void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ ppc_avr_t result; \
+ int i; \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ for (i = 0; i < n_elems/2; i++) { \
+ if (highp) { \
+ result.element[i*2+HI_IDX] = a->element[i]; \
+ result.element[i*2+LO_IDX] = b->element[i]; \
+ } else { \
+ result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
+ result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
+ } \
+ } \
+ *r = result; \
+ }
+#if defined(HOST_WORDS_BIGENDIAN)
+#define MRGHI 0
+#define MRGLO 1
+#else
+#define MRGHI 1
+#define MRGLO 0
+#endif
+#define VMRG(suffix, element) \
+ VMRG_DO(mrgl##suffix, element, MRGHI) \
+ VMRG_DO(mrgh##suffix, element, MRGLO)
+VMRG(b, u8)
+VMRG(h, u16)
+VMRG(w, u32)
+#undef VMRG_DO
+#undef VMRG
+#undef MRGHI
+#undef MRGLO
+
+void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
+ prod[i] = (int32_t)a->s8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
+ }
+}
+
+void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
+ }
+}
+
+void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = (int32_t)a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I (i, s32) {
+ int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
+ r->u32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint16_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ prod[i] = a->u8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
+ }
+}
+
+void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
+ }
+}
+
+void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I (i, s32) {
+ uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#define VMUL_DO(name, mul_element, prod_element, evenp) \
+ void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ VECTOR_FOR_INORDER_I(i, prod_element) { \
+ if (evenp) { \
+ r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
+ } else { \
+ r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
+ } \
+ } \
+ }
+#define VMUL(suffix, mul_element, prod_element) \
+ VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
+ VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
+VMUL(sb, s8, s16)
+VMUL(sh, s16, s32)
+VMUL(ub, u8, u16)
+VMUL(uh, u16, u32)
+#undef VMUL_DO
+#undef VMUL
+
+void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
+ /* Need to do the computation is higher precision and round
+ * once at the end. */
+ float64 af, bf, cf, t;
+ af = float32_to_float64(a->f[i], &env->vec_status);
+ bf = float32_to_float64(b->f[i], &env->vec_status);
+ cf = float32_to_float64(c->f[i], &env->vec_status);
+ t = float64_mul(af, cf, &env->vec_status);
+ t = float64_sub(t, bf, &env->vec_status);
+ t = float64_chs(t);
+ r->f[i] = float64_to_float32(t, &env->vec_status);
+ }
+ }
+}
+
+void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+ VECTOR_FOR_INORDER_I (i, u8) {
+ int s = c->u8[i] & 0x1f;
+#if defined(HOST_WORDS_BIGENDIAN)
+ int index = s & 0xf;
+#else
+ int index = 15 - (s & 0xf);
+#endif
+ if (s & 0x10) {
+ result.u8[i] = b->u8[index];
+ } else {
+ result.u8[i] = a->u8[index];
+ }
+ }
+ *r = result;
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define PKBIG 1
+#else
+#define PKBIG 0
+#endif
+void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result;
+#if defined(HOST_WORDS_BIGENDIAN)
+ const ppc_avr_t *x[2] = { a, b };
+#else
+ const ppc_avr_t *x[2] = { b, a };
+#endif
+
+ VECTOR_FOR_INORDER_I (i, u64) {
+ VECTOR_FOR_INORDER_I (j, u32){
+ uint32_t e = x[i]->u32[j];
+ result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
+ ((e >> 6) & 0x3e0) |
+ ((e >> 3) & 0x1f));
+ }
+ }
+ *r = result;
+}
+
+#define VPK(suffix, from, to, cvt, dosat) \
+ void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ int sat = 0; \
+ ppc_avr_t result; \
+ ppc_avr_t *a0 = PKBIG ? a : b; \
+ ppc_avr_t *a1 = PKBIG ? b : a; \
+ VECTOR_FOR_INORDER_I (i, from) { \
+ result.to[i] = cvt(a0->from[i], &sat); \
+ result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
+ } \
+ *r = result; \
+ if (dosat && sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+#define I(x, y) (x)
+VPK(shss, s16, s8, cvtshsb, 1)
+VPK(shus, s16, u8, cvtshub, 1)
+VPK(swss, s32, s16, cvtswsh, 1)
+VPK(swus, s32, u16, cvtswuh, 1)
+VPK(uhus, u16, u8, cvtuhub, 1)
+VPK(uwus, u32, u16, cvtuwuh, 1)
+VPK(uhum, u16, u8, I, 0)
+VPK(uwum, u32, u16, I, 0)
+#undef I
+#undef VPK
+#undef PKBIG
+
+void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN1(r->f[i], b->f[i]) {
+ r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
+ }
+ }
+}
+
+#define VRFI(suffix, rounding) \
+ void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ float_status s = env->vec_status; \
+ set_float_rounding_mode(rounding, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ HANDLE_NAN1(r->f[i], b->f[i]) { \
+ r->f[i] = float32_round_to_int (b->f[i], &s); \
+ } \
+ } \
+ }
+VRFI(n, float_round_nearest_even)
+VRFI(m, float_round_down)
+VRFI(p, float_round_up)
+VRFI(z, float_round_to_zero)
+#undef VRFI
+
+#define VROTATE(suffix, element) \
+ void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
+ unsigned int shift = b->element[i] & mask; \
+ r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
+ } \
+ }
+VROTATE(b, u8)
+VROTATE(h, u16)
+VROTATE(w, u32)
+#undef VROTATE
+
+void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN1(r->f[i], b->f[i]) {
+ float32 t = float32_sqrt(b->f[i], &env->vec_status);
+ r->f[i] = float32_div(float32_one, t, &env->vec_status);
+ }
+ }
+}
+
+void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
+ r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
+}
+
+void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN1(r->f[i], b->f[i]) {
+ r->f[i] = float32_exp2(b->f[i], &env->vec_status);
+ }
+ }
+}
+
+void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN1(r->f[i], b->f[i]) {
+ r->f[i] = float32_log2(b->f[i], &env->vec_status);
+ }
+ }
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define LEFT 0
+#define RIGHT 1
+#else
+#define LEFT 1
+#define RIGHT 0
+#endif
+/* The specification says that the results are undefined if all of the
+ * shift counts are not identical. We check to make sure that they are
+ * to conform to what real hardware appears to do. */
+#define VSHIFT(suffix, leftp) \
+ void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int shift = b->u8[LO_IDX*15] & 0x7; \
+ int doit = 1; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
+ doit = doit && ((b->u8[i] & 0x7) == shift); \
+ } \
+ if (doit) { \
+ if (shift == 0) { \
+ *r = *a; \
+ } else if (leftp) { \
+ uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
+ r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
+ r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
+ } else { \
+ uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
+ r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
+ r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
+ } \
+ } \
+ }
+VSHIFT(l, LEFT)
+VSHIFT(r, RIGHT)
+#undef VSHIFT
+#undef LEFT
+#undef RIGHT
+
+#define VSL(suffix, element) \
+ void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
+ unsigned int shift = b->element[i] & mask; \
+ r->element[i] = a->element[i] << shift; \
+ } \
+ }
+VSL(b, u8)
+VSL(h, u16)
+VSL(w, u32)
+#undef VSL
+
+void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
+{
+ int sh = shift & 0xf;
+ int i;
+ ppc_avr_t result;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int index = sh + i;
+ if (index > 0xf) {
+ result.u8[i] = b->u8[index-0x10];
+ } else {
+ result.u8[i] = a->u8[index];
+ }
+ }
+#else
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int index = (16 - sh) + i;
+ if (index > 0xf) {
+ result.u8[i] = a->u8[index-0x10];
+ } else {
+ result.u8[i] = b->u8[index];
+ }
+ }
+#endif
+ *r = result;
+}
+
+void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
+
+#if defined (HOST_WORDS_BIGENDIAN)
+ memmove (&r->u8[0], &a->u8[sh], 16-sh);
+ memset (&r->u8[16-sh], 0, sh);
+#else
+ memmove (&r->u8[sh], &a->u8[0], 16-sh);
+ memset (&r->u8[0], 0, sh);
+#endif
+}
+
+/* Experimental testing shows that hardware masks the immediate. */
+#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
+#if defined(HOST_WORDS_BIGENDIAN)
+#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
+#else
+#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
+#endif
+#define VSPLT(suffix, element) \
+ void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
+ { \
+ uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = s; \
+ } \
+ }
+VSPLT(b, u8)
+VSPLT(h, u16)
+VSPLT(w, u32)
+#undef VSPLT
+#undef SPLAT_ELEMENT
+#undef _SPLAT_MASKED
+
+#define VSPLTI(suffix, element, splat_type) \
+ void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
+ { \
+ splat_type x = (int8_t)(splat << 3) >> 3; \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = x; \
+ } \
+ }
+VSPLTI(b, s8, int8_t)
+VSPLTI(h, s16, int16_t)
+VSPLTI(w, s32, int32_t)
+#undef VSPLTI
+
+#define VSR(suffix, element) \
+ void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
+ unsigned int shift = b->element[i] & mask; \
+ r->element[i] = a->element[i] >> shift; \
+ } \
+ }
+VSR(ab, s8)
+VSR(ah, s16)
+VSR(aw, s32)
+VSR(b, u8)
+VSR(h, u16)
+VSR(w, u32)
+#undef VSR
+
+void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
+
+#if defined (HOST_WORDS_BIGENDIAN)
+ memmove (&r->u8[sh], &a->u8[0], 16-sh);
+ memset (&r->u8[0], 0, sh);
+#else
+ memmove (&r->u8[0], &a->u8[sh], 16-sh);
+ memset (&r->u8[16-sh], 0, sh);
+#endif
+}
+
+void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = a->u32[i] >= b->u32[i];
+ }
+}
+
+void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int64_t t;
+ int i, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ upper = ARRAY_SIZE(r->s32)-1;
+#else
+ upper = 0;
+#endif
+ t = (int64_t)b->s32[upper];
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ t += a->s32[i];
+ result.s32[i] = 0;
+ }
+ result.s32[upper] = cvtsdsw(t, &sat);
+ *r = result;
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ upper = 1;
+#else
+ upper = 0;
+#endif
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ int64_t t = (int64_t)b->s32[upper+i*2];
+ result.u64[i] = 0;
+ for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
+ t += a->s32[2*i+j];
+ }
+ result.s32[upper+i*2] = cvtsdsw(t, &sat);
+ }
+
+ *r = result;
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+ for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
+ t += a->s8[4*i+j];
+ }
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+ t += a->s16[2*i] + a->s16[2*i+1];
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t t = (uint64_t)b->u32[i];
+ for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
+ t += a->u8[4*i+j];
+ }
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define UPKHI 1
+#define UPKLO 0
+#else
+#define UPKHI 0
+#define UPKLO 1
+#endif
+#define VUPKPX(suffix, hi) \
+ void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
+ uint16_t e = b->u16[hi ? i : i+4]; \
+ uint8_t a = (e >> 15) ? 0xff : 0; \
+ uint8_t r = (e >> 10) & 0x1f; \
+ uint8_t g = (e >> 5) & 0x1f; \
+ uint8_t b = e & 0x1f; \
+ result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
+ } \
+ *r = result; \
+ }
+VUPKPX(lpx, UPKLO)
+VUPKPX(hpx, UPKHI)
+#undef VUPKPX
+
+#define VUPK(suffix, unpacked, packee, hi) \
+ void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ if (hi) { \
+ for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
+ result.unpacked[i] = b->packee[i]; \
+ } \
+ } else { \
+ for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
+ result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
+ } \
+ } \
+ *r = result; \
+ }
+VUPK(hsb, s16, s8, UPKHI)
+VUPK(hsh, s32, s16, UPKHI)
+VUPK(lsb, s16, s8, UPKLO)
+VUPK(lsh, s32, s16, UPKLO)
+#undef VUPK
+#undef UPKHI
+#undef UPKLO
+
+#undef DO_HANDLE_NAN
+#undef HANDLE_NAN1
+#undef HANDLE_NAN2
+#undef HANDLE_NAN3
+#undef VECTOR_FOR_INORDER_I
+#undef HI_IDX
+#undef LO_IDX
+
+/*****************************************************************************/
+/* SPE extension helpers */
+/* Use a table to make this quicker */
+static uint8_t hbrev[16] = {
+ 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
+ 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
+};
+
+static inline uint8_t byte_reverse(uint8_t val)
+{
+ return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
+}
+
+static inline uint32_t word_reverse(uint32_t val)
+{
+ return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
+ (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
+}
+
+#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
+target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
+{
+ uint32_t a, b, d, mask;
+
+ mask = UINT32_MAX >> (32 - MASKBITS);
+ a = arg1 & mask;
+ b = arg2 & mask;
+ d = word_reverse(1 + word_reverse(a | ~b));
+ return (arg1 & ~mask) | (d & b);
+}
+
+uint32_t helper_cntlsw32 (uint32_t val)
+{
+ if (val & 0x80000000)
+ return clz32(~val);
+ else
+ return clz32(val);
+}
+
+uint32_t helper_cntlzw32 (uint32_t val)
+{
+ return clz32(val);
+}
+
+/* Single-precision floating-point conversions */
+static inline uint32_t efscfsi(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfui(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline int32_t efsctsi(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctui(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctsiz(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+
+ return float32_to_int32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuiz(uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+
+ return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efscfsf(uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+ tmp = int64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfuf(uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efsctsf(uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuf(uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f)))
+ return 0;
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+#define HELPER_SPE_SINGLE_CONV(name) \
+uint32_t helper_e##name (uint32_t val) \
+{ \
+ return e##name(val); \
+}
+/* efscfsi */
+HELPER_SPE_SINGLE_CONV(fscfsi);
+/* efscfui */
+HELPER_SPE_SINGLE_CONV(fscfui);
+/* efscfuf */
+HELPER_SPE_SINGLE_CONV(fscfuf);
+/* efscfsf */
+HELPER_SPE_SINGLE_CONV(fscfsf);
+/* efsctsi */
+HELPER_SPE_SINGLE_CONV(fsctsi);
+/* efsctui */
+HELPER_SPE_SINGLE_CONV(fsctui);
+/* efsctsiz */
+HELPER_SPE_SINGLE_CONV(fsctsiz);
+/* efsctuiz */
+HELPER_SPE_SINGLE_CONV(fsctuiz);
+/* efsctsf */
+HELPER_SPE_SINGLE_CONV(fsctsf);
+/* efsctuf */
+HELPER_SPE_SINGLE_CONV(fsctuf);
+
+#define HELPER_SPE_VECTOR_CONV(name) \
+uint64_t helper_ev##name (uint64_t val) \
+{ \
+ return ((uint64_t)e##name(val >> 32) << 32) | \
+ (uint64_t)e##name(val); \
+}
+/* evfscfsi */
+HELPER_SPE_VECTOR_CONV(fscfsi);
+/* evfscfui */
+HELPER_SPE_VECTOR_CONV(fscfui);
+/* evfscfuf */
+HELPER_SPE_VECTOR_CONV(fscfuf);
+/* evfscfsf */
+HELPER_SPE_VECTOR_CONV(fscfsf);
+/* evfsctsi */
+HELPER_SPE_VECTOR_CONV(fsctsi);
+/* evfsctui */
+HELPER_SPE_VECTOR_CONV(fsctui);
+/* evfsctsiz */
+HELPER_SPE_VECTOR_CONV(fsctsiz);
+/* evfsctuiz */
+HELPER_SPE_VECTOR_CONV(fsctuiz);
+/* evfsctsf */
+HELPER_SPE_VECTOR_CONV(fsctsf);
+/* evfsctuf */
+HELPER_SPE_VECTOR_CONV(fsctuf);
+
+/* Single-precision floating-point arithmetic */
+static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_add(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efssub(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_div(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+#define HELPER_SPE_SINGLE_ARITH(name) \
+uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
+{ \
+ return e##name(op1, op2); \
+}
+/* efsadd */
+HELPER_SPE_SINGLE_ARITH(fsadd);
+/* efssub */
+HELPER_SPE_SINGLE_ARITH(fssub);
+/* efsmul */
+HELPER_SPE_SINGLE_ARITH(fsmul);
+/* efsdiv */
+HELPER_SPE_SINGLE_ARITH(fsdiv);
+
+#define HELPER_SPE_VECTOR_ARITH(name) \
+uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
+{ \
+ return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
+ (uint64_t)e##name(op1, op2); \
+}
+/* evfsadd */
+HELPER_SPE_VECTOR_ARITH(fsadd);
+/* evfssub */
+HELPER_SPE_VECTOR_ARITH(fssub);
+/* evfsmul */
+HELPER_SPE_VECTOR_ARITH(fsmul);
+/* evfsdiv */
+HELPER_SPE_VECTOR_ARITH(fsdiv);
+
+/* Single-precision floating-point comparisons */
+static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
+}
+
+static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+ u1.l = op1;
+ u2.l = op2;
+ return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return efststlt(op1, op2);
+}
+
+static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return efststgt(op1, op2);
+}
+
+static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return efststeq(op1, op2);
+}
+
+#define HELPER_SINGLE_SPE_CMP(name) \
+uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
+{ \
+ return e##name(op1, op2) << 2; \
+}
+/* efststlt */
+HELPER_SINGLE_SPE_CMP(fststlt);
+/* efststgt */
+HELPER_SINGLE_SPE_CMP(fststgt);
+/* efststeq */
+HELPER_SINGLE_SPE_CMP(fststeq);
+/* efscmplt */
+HELPER_SINGLE_SPE_CMP(fscmplt);
+/* efscmpgt */
+HELPER_SINGLE_SPE_CMP(fscmpgt);
+/* efscmpeq */
+HELPER_SINGLE_SPE_CMP(fscmpeq);
+
+static inline uint32_t evcmp_merge(int t0, int t1)
+{
+ return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
+}
+
+#define HELPER_VECTOR_SPE_CMP(name) \
+uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
+{ \
+ return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
+}
+/* evfststlt */
+HELPER_VECTOR_SPE_CMP(fststlt);
+/* evfststgt */
+HELPER_VECTOR_SPE_CMP(fststgt);
+/* evfststeq */
+HELPER_VECTOR_SPE_CMP(fststeq);
+/* evfscmplt */
+HELPER_VECTOR_SPE_CMP(fscmplt);
+/* evfscmpgt */
+HELPER_VECTOR_SPE_CMP(fscmpgt);
+/* evfscmpeq */
+HELPER_VECTOR_SPE_CMP(fscmpeq);
+
+/* Double-precision floating-point conversion */
+uint64_t helper_efdcfsi (uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfsid (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfui (uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuid (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsi (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctui (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctsiz (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctsidz (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuiz (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctuidz (uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdcfsf (uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuf (uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsf (uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuf (uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efscfd (uint64_t val)
+{
+ CPU_DoubleU u1;
+ CPU_FloatU u2;
+
+ u1.ll = val;
+ u2.f = float64_to_float32(u1.d, &env->vec_status);
+
+ return u2.l;
+}
+
+uint64_t helper_efdcfs (uint32_t val)
+{
+ CPU_DoubleU u2;
+ CPU_FloatU u1;
+
+ u1.l = val;
+ u2.d = float32_to_float64(u1.f, &env->vec_status);
+
+ return u2.ll;
+}
+
+/* Double precision fixed-point arithmetic */
+uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_add(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_div(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+/* Double precision floating point helpers */
+uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
+}
+
+uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstlt(op1, op2);
+}
+
+uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstgt(op1, op2);
+}
+
+uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtsteq(op1, op2);
+}
+
+/*****************************************************************************/
+/* Softmmu support */
+#if !defined (CONFIG_USER_ONLY)
+
+#define MMUSUFFIX _mmu
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
+{
+ TranslationBlock *tb;
+ CPUState *saved_env;
+ unsigned long pc;
+ int ret;
+
+ /* XXX: hack to restore env in all cases, even if not called from
+ generated code */
+ saved_env = env;
+ env = cpu_single_env;
+ ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
+ if (unlikely(ret != 0)) {
+ if (likely(retaddr)) {
+ /* now we have a real cpu fault */
+ pc = (unsigned long)retaddr;
+ tb = tb_find_pc(pc);
+ if (likely(tb)) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc, NULL);
+ }
+ }
+ helper_raise_exception_err(env->exception_index, env->error_code);
+ }
+ env = saved_env;
+}
+
+/* Segment registers load and store */
+target_ulong helper_load_sr (target_ulong sr_num)
+{
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64)
+ return ppc_load_sr(env, sr_num);
+#endif
+ return env->sr[sr_num];
+}
+
+void helper_store_sr (target_ulong sr_num, target_ulong val)
+{
+ ppc_store_sr(env, sr_num, val);
+}
+
+/* SLB management */
+#if defined(TARGET_PPC64)
+target_ulong helper_load_slb (target_ulong slb_nr)
+{
+ return ppc_load_slb(env, slb_nr);
+}
+
+void helper_store_slb (target_ulong rb, target_ulong rs)
+{
+ ppc_store_slb(env, rb, rs);
+}
+
+void helper_slbia (void)
+{
+ ppc_slb_invalidate_all(env);
+}
+
+void helper_slbie (target_ulong addr)
+{
+ ppc_slb_invalidate_one(env, addr);
+}
+
+#endif /* defined(TARGET_PPC64) */
+
+/* TLB management */
+void helper_tlbia (void)
+{
+ ppc_tlb_invalidate_all(env);
+}
+
+void helper_tlbie (target_ulong addr)
+{
+ ppc_tlb_invalidate_one(env, addr);
+}
+
+/* Software driven TLBs management */
+/* PowerPC 602/603 software TLB load instructions helpers */
+static void do_6xx_tlb (target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_RPA];
+ if (is_code) {
+ CMP = env->spr[SPR_ICMP];
+ EPN = env->spr[SPR_IMISS];
+ } else {
+ CMP = env->spr[SPR_DCMP];
+ EPN = env->spr[SPR_DMISS];
+ }
+ way = (env->spr[SPR_SRR1] >> 17) & 1;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_6xx_tlbd (target_ulong EPN)
+{
+ do_6xx_tlb(EPN, 0);
+}
+
+void helper_6xx_tlbi (target_ulong EPN)
+{
+ do_6xx_tlb(EPN, 1);
+}
+
+/* PowerPC 74xx software TLB load instructions helpers */
+static void do_74xx_tlb (target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_PTELO];
+ CMP = env->spr[SPR_PTEHI];
+ EPN = env->spr[SPR_TLBMISS] & ~0x3;
+ way = env->spr[SPR_TLBMISS] & 0x3;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_74xx_tlbd (target_ulong EPN)
+{
+ do_74xx_tlb(EPN, 0);
+}
+
+void helper_74xx_tlbi (target_ulong EPN)
+{
+ do_74xx_tlb(EPN, 1);
+}
+
+static inline target_ulong booke_tlb_to_page_size(int size)
+{
+ return 1024 << (2 * size);
+}
+
+static inline int booke_page_size_to_tlb(target_ulong page_size)
+{
+ int size;
+
+ switch (page_size) {
+ case 0x00000400UL:
+ size = 0x0;
+ break;
+ case 0x00001000UL:
+ size = 0x1;
+ break;
+ case 0x00004000UL:
+ size = 0x2;
+ break;
+ case 0x00010000UL:
+ size = 0x3;
+ break;
+ case 0x00040000UL:
+ size = 0x4;
+ break;
+ case 0x00100000UL:
+ size = 0x5;
+ break;
+ case 0x00400000UL:
+ size = 0x6;
+ break;
+ case 0x01000000UL:
+ size = 0x7;
+ break;
+ case 0x04000000UL:
+ size = 0x8;
+ break;
+ case 0x10000000UL:
+ size = 0x9;
+ break;
+ case 0x40000000UL:
+ size = 0xA;
+ break;
+#if defined (TARGET_PPC64)
+ case 0x000100000000ULL:
+ size = 0xB;
+ break;
+ case 0x000400000000ULL:
+ size = 0xC;
+ break;
+ case 0x001000000000ULL:
+ size = 0xD;
+ break;
+ case 0x004000000000ULL:
+ size = 0xE;
+ break;
+ case 0x010000000000ULL:
+ size = 0xF;
+ break;
+#endif
+ default:
+ size = -1;
+ break;
+ }
+
+ return size;
+}
+
+/* Helpers for 4xx TLB management */
+#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
+
+#define PPC4XX_TLBHI_V 0x00000040
+#define PPC4XX_TLBHI_E 0x00000020
+#define PPC4XX_TLBHI_SIZE_MIN 0
+#define PPC4XX_TLBHI_SIZE_MAX 7
+#define PPC4XX_TLBHI_SIZE_DEFAULT 1
+#define PPC4XX_TLBHI_SIZE_SHIFT 7
+#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
+
+#define PPC4XX_TLBLO_EX 0x00000200
+#define PPC4XX_TLBLO_WR 0x00000100
+#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
+#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
+
+target_ulong helper_4xx_tlbre_hi (target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb[entry].tlbe;
+ ret = tlb->EPN;
+ if (tlb->prot & PAGE_VALID) {
+ ret |= PPC4XX_TLBHI_V;
+ }
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
+ size = PPC4XX_TLBHI_SIZE_DEFAULT;
+ }
+ ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
+ env->spr[SPR_40x_PID] = tlb->PID;
+ return ret;
+}
+
+target_ulong helper_4xx_tlbre_lo (target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb[entry].tlbe;
+ ret = tlb->RPN;
+ if (tlb->prot & PAGE_EXEC) {
+ ret |= PPC4XX_TLBLO_EX;
+ }
+ if (tlb->prot & PAGE_WRITE) {
+ ret |= PPC4XX_TLBLO_WR;
+ }
+ return ret;
+}
+
+void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong page, end;
+
+ LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb[entry].tlbe;
+ /* Invalidate previous TLB (if it's valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(env, page);
+ }
+ }
+ tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
+ & PPC4XX_TLBHI_SIZE_MASK);
+ /* We cannot handle TLB size < TARGET_PAGE_SIZE.
+ * If this ever occurs, one should use the ppcemb target instead
+ * of the ppc or ppc64 one
+ */
+ if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
+ cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
+ "are not supported (%d)\n",
+ tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
+ }
+ tlb->EPN = val & ~(tlb->size - 1);
+ if (val & PPC4XX_TLBHI_V) {
+ tlb->prot |= PAGE_VALID;
+ if (val & PPC4XX_TLBHI_E) {
+ /* XXX: TO BE FIXED */
+ cpu_abort(env,
+ "Little-endian TLB entries are not supported by now\n");
+ }
+ } else {
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb->PID = env->spr[SPR_40x_PID]; /* PID */
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ /* Invalidate new TLB (if valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(env, page);
+ }
+ }
+}
+
+void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
+{
+ ppcemb_tlb_t *tlb;
+
+ LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb[entry].tlbe;
+ tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
+ tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
+ tlb->prot = PAGE_READ;
+ if (val & PPC4XX_TLBLO_EX) {
+ tlb->prot |= PAGE_EXEC;
+ }
+ if (val & PPC4XX_TLBLO_WR) {
+ tlb->prot |= PAGE_WRITE;
+ }
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+}
+
+target_ulong helper_4xx_tlbsx (target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
+}
+
+/* PowerPC 440 TLB management */
+void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong EPN, RPN, size;
+ int do_flush_tlbs;
+
+ LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
+ __func__, word, (int)entry, value);
+ do_flush_tlbs = 0;
+ entry &= 0x3F;
+ tlb = &env->tlb[entry].tlbe;
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ EPN = value & 0xFFFFFC00;
+ if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
+ do_flush_tlbs = 1;
+ tlb->EPN = EPN;
+ size = booke_tlb_to_page_size((value >> 4) & 0xF);
+ if ((tlb->prot & PAGE_VALID) && tlb->size < size)
+ do_flush_tlbs = 1;
+ tlb->size = size;
+ tlb->attr &= ~0x1;
+ tlb->attr |= (value >> 8) & 1;
+ if (value & 0x200) {
+ tlb->prot |= PAGE_VALID;
+ } else {
+ if (tlb->prot & PAGE_VALID) {
+ tlb->prot &= ~PAGE_VALID;
+ do_flush_tlbs = 1;
+ }
+ }
+ tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
+ if (do_flush_tlbs)
+ tlb_flush(env, 1);
+ break;
+ case 1:
+ RPN = value & 0xFFFFFC0F;
+ if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
+ tlb_flush(env, 1);
+ tlb->RPN = RPN;
+ break;
+ case 2:
+ tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
+ tlb->prot = tlb->prot & PAGE_VALID;
+ if (value & 0x1)
+ tlb->prot |= PAGE_READ << 4;
+ if (value & 0x2)
+ tlb->prot |= PAGE_WRITE << 4;
+ if (value & 0x4)
+ tlb->prot |= PAGE_EXEC << 4;
+ if (value & 0x8)
+ tlb->prot |= PAGE_READ;
+ if (value & 0x10)
+ tlb->prot |= PAGE_WRITE;
+ if (value & 0x20)
+ tlb->prot |= PAGE_EXEC;
+ break;
+ }
+}
+
+target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= 0x3F;
+ tlb = &env->tlb[entry].tlbe;
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ ret = tlb->EPN;
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < 0 || size > 0xF)
+ size = 1;
+ ret |= size << 4;
+ if (tlb->attr & 0x1)
+ ret |= 0x100;
+ if (tlb->prot & PAGE_VALID)
+ ret |= 0x200;
+ env->spr[SPR_440_MMUCR] &= ~0x000000FF;
+ env->spr[SPR_440_MMUCR] |= tlb->PID;
+ break;
+ case 1:
+ ret = tlb->RPN;
+ break;
+ case 2:
+ ret = tlb->attr & ~0x1;
+ if (tlb->prot & (PAGE_READ << 4))
+ ret |= 0x1;
+ if (tlb->prot & (PAGE_WRITE << 4))
+ ret |= 0x2;
+ if (tlb->prot & (PAGE_EXEC << 4))
+ ret |= 0x4;
+ if (tlb->prot & PAGE_READ)
+ ret |= 0x8;
+ if (tlb->prot & PAGE_WRITE)
+ ret |= 0x10;
+ if (tlb->prot & PAGE_EXEC)
+ ret |= 0x20;
+ break;
+ }
+ return ret;
+}
+
+target_ulong helper_440_tlbsx (target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
new file mode 100644
index 0000000..89413c5
--- /dev/null
+++ b/target-ppc/translate.c
@@ -0,0 +1,9141 @@
+/*
+ * PowerPC emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+#include "tcg-op.h"
+#include "qemu-common.h"
+#include "host-utils.h"
+
+#include "helper.h"
+#define GEN_HELPER 1
+#include "helper.h"
+
+#define CPU_SINGLE_STEP 0x1
+#define CPU_BRANCH_STEP 0x2
+#define GDBSTUB_SINGLE_STEP 0x4
+
+/* Include definitions for instructions classes and implementations flags */
+//#define PPC_DEBUG_DISAS
+//#define DO_PPC_STATISTICS
+
+#ifdef PPC_DEBUG_DISAS
+# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+/*****************************************************************************/
+/* Code translation helpers */
+
+/* global register indexes */
+static TCGv_ptr cpu_env;
+static char cpu_reg_names[10*3 + 22*4 /* GPR */
+#if !defined(TARGET_PPC64)
+ + 10*4 + 22*5 /* SPE GPRh */
+#endif
+ + 10*4 + 22*5 /* FPR */
+ + 2*(10*6 + 22*7) /* AVRh, AVRl */
+ + 8*5 /* CRF */];
+static TCGv cpu_gpr[32];
+#if !defined(TARGET_PPC64)
+static TCGv cpu_gprh[32];
+#endif
+static TCGv_i64 cpu_fpr[32];
+static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
+static TCGv_i32 cpu_crf[8];
+static TCGv cpu_nip;
+static TCGv cpu_msr;
+static TCGv cpu_ctr;
+static TCGv cpu_lr;
+static TCGv cpu_xer;
+static TCGv cpu_reserve;
+static TCGv_i32 cpu_fpscr;
+static TCGv_i32 cpu_access_type;
+
+#include "gen-icount.h"
+
+void ppc_translate_init(void)
+{
+ int i;
+ char* p;
+ size_t cpu_reg_names_size;
+ static int done_init = 0;
+
+ if (done_init)
+ return;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+ p = cpu_reg_names;
+ cpu_reg_names_size = sizeof(cpu_reg_names);
+
+ for (i = 0; i < 8; i++) {
+ snprintf(p, cpu_reg_names_size, "crf%d", i);
+ cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, crf[i]), p);
+ p += 5;
+ cpu_reg_names_size -= 5;
+ }
+
+ for (i = 0; i < 32; i++) {
+ snprintf(p, cpu_reg_names_size, "r%d", i);
+ cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, gpr[i]), p);
+ p += (i < 10) ? 3 : 4;
+ cpu_reg_names_size -= (i < 10) ? 3 : 4;
+#if !defined(TARGET_PPC64)
+ snprintf(p, cpu_reg_names_size, "r%dH", i);
+ cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, gprh[i]), p);
+ p += (i < 10) ? 4 : 5;
+ cpu_reg_names_size -= (i < 10) ? 4 : 5;
+#endif
+
+ snprintf(p, cpu_reg_names_size, "fp%d", i);
+ cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, fpr[i]), p);
+ p += (i < 10) ? 4 : 5;
+ cpu_reg_names_size -= (i < 10) ? 4 : 5;
+
+ snprintf(p, cpu_reg_names_size, "avr%dH", i);
+#ifdef HOST_WORDS_BIGENDIAN
+ cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[0]), p);
+#else
+ cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[1]), p);
+#endif
+ p += (i < 10) ? 6 : 7;
+ cpu_reg_names_size -= (i < 10) ? 6 : 7;
+
+ snprintf(p, cpu_reg_names_size, "avr%dL", i);
+#ifdef HOST_WORDS_BIGENDIAN
+ cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[1]), p);
+#else
+ cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[0]), p);
+#endif
+ p += (i < 10) ? 6 : 7;
+ cpu_reg_names_size -= (i < 10) ? 6 : 7;
+ }
+
+ cpu_nip = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, nip), "nip");
+
+ cpu_msr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, msr), "msr");
+
+ cpu_ctr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, ctr), "ctr");
+
+ cpu_lr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, lr), "lr");
+
+ cpu_xer = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, xer), "xer");
+
+ cpu_reserve = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, reserve_addr),
+ "reserve_addr");
+
+ cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, fpscr), "fpscr");
+
+ cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, access_type), "access_type");
+
+ /* register helpers */
+#define GEN_HELPER 2
+#include "helper.h"
+
+ done_init = 1;
+}
+
+/* internal defines */
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong nip;
+ uint32_t opcode;
+ uint32_t exception;
+ /* Routine used to access memory */
+ int mem_idx;
+ int access_type;
+ /* Translation flags */
+ int le_mode;
+#if defined(TARGET_PPC64)
+ int sf_mode;
+#endif
+ int fpu_enabled;
+ int altivec_enabled;
+ int spe_enabled;
+ ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
+ int singlestep_enabled;
+} DisasContext;
+
+struct opc_handler_t {
+ /* invalid bits */
+ uint32_t inval;
+ /* instruction type */
+ uint64_t type;
+ /* handler */
+ void (*handler)(DisasContext *ctx);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ const char *oname;
+#endif
+#if defined(DO_PPC_STATISTICS)
+ uint64_t count;
+#endif
+};
+
+static inline void gen_reset_fpstatus(void)
+{
+#ifdef CONFIG_SOFTFLOAT
+ gen_helper_reset_fpstatus();
+#endif
+}
+
+static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ if (set_fprf != 0) {
+ /* This case might be optimized later */
+ tcg_gen_movi_i32(t0, 1);
+ gen_helper_compute_fprf(t0, arg, t0);
+ if (unlikely(set_rc)) {
+ tcg_gen_mov_i32(cpu_crf[1], t0);
+ }
+ gen_helper_float_check_status();
+ } else if (unlikely(set_rc)) {
+ /* We always need to compute fpcc */
+ tcg_gen_movi_i32(t0, 0);
+ gen_helper_compute_fprf(t0, arg, t0);
+ tcg_gen_mov_i32(cpu_crf[1], t0);
+ }
+
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_set_access_type(DisasContext *ctx, int access_type)
+{
+ if (ctx->access_type != access_type) {
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+ ctx->access_type = access_type;
+ }
+}
+
+static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ tcg_gen_movi_tl(cpu_nip, nip);
+ else
+#endif
+ tcg_gen_movi_tl(cpu_nip, (uint32_t)nip);
+}
+
+static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
+{
+ TCGv_i32 t0, t1;
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip);
+ }
+ t0 = tcg_const_i32(excp);
+ t1 = tcg_const_i32(error);
+ gen_helper_raise_exception_err(t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ ctx->exception = (excp);
+}
+
+static inline void gen_exception(DisasContext *ctx, uint32_t excp)
+{
+ TCGv_i32 t0;
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip);
+ }
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(t0);
+ tcg_temp_free_i32(t0);
+ ctx->exception = (excp);
+}
+
+static inline void gen_debug_exception(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ if (ctx->exception != POWERPC_EXCP_BRANCH)
+ gen_update_nip(ctx, ctx->nip);
+ t0 = tcg_const_i32(EXCP_DEBUG);
+ gen_helper_raise_exception(t0);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
+}
+
+/* Stop translation */
+static inline void gen_stop_exception(DisasContext *ctx)
+{
+ gen_update_nip(ctx, ctx->nip);
+ ctx->exception = POWERPC_EXCP_STOP;
+}
+
+/* No need to update nip here, as execution flow will change */
+static inline void gen_sync_exception(DisasContext *ctx)
+{
+ ctx->exception = POWERPC_EXCP_SYNC;
+}
+
+#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE(name, opc1, opc2, opc3, inval, type)
+
+#define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type)
+
+typedef struct opcode_t {
+ unsigned char opc1, opc2, opc3;
+#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
+ unsigned char pad[5];
+#else
+ unsigned char pad[1];
+#endif
+ opc_handler_t handler;
+ const char *oname;
+} opcode_t;
+
+/*****************************************************************************/
+/*** Instruction decoding ***/
+#define EXTRACT_HELPER(name, shift, nb) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return (opcode >> (shift)) & ((1 << (nb)) - 1); \
+}
+
+#define EXTRACT_SHELPER(name, shift, nb) \
+static inline int32_t name(uint32_t opcode) \
+{ \
+ return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
+}
+
+/* Opcode part 1 */
+EXTRACT_HELPER(opc1, 26, 6);
+/* Opcode part 2 */
+EXTRACT_HELPER(opc2, 1, 5);
+/* Opcode part 3 */
+EXTRACT_HELPER(opc3, 6, 5);
+/* Update Cr0 flags */
+EXTRACT_HELPER(Rc, 0, 1);
+/* Destination */
+EXTRACT_HELPER(rD, 21, 5);
+/* Source */
+EXTRACT_HELPER(rS, 21, 5);
+/* First operand */
+EXTRACT_HELPER(rA, 16, 5);
+/* Second operand */
+EXTRACT_HELPER(rB, 11, 5);
+/* Third operand */
+EXTRACT_HELPER(rC, 6, 5);
+/*** Get CRn ***/
+EXTRACT_HELPER(crfD, 23, 3);
+EXTRACT_HELPER(crfS, 18, 3);
+EXTRACT_HELPER(crbD, 21, 5);
+EXTRACT_HELPER(crbA, 16, 5);
+EXTRACT_HELPER(crbB, 11, 5);
+/* SPR / TBL */
+EXTRACT_HELPER(_SPR, 11, 10);
+static inline uint32_t SPR(uint32_t opcode)
+{
+ uint32_t sprn = _SPR(opcode);
+
+ return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+}
+/*** Get constants ***/
+EXTRACT_HELPER(IMM, 12, 8);
+/* 16 bits signed immediate value */
+EXTRACT_SHELPER(SIMM, 0, 16);
+/* 16 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM, 0, 16);
+/* 5 bits signed immediate value */
+EXTRACT_HELPER(SIMM5, 16, 5);
+/* 5 bits signed immediate value */
+EXTRACT_HELPER(UIMM5, 16, 5);
+/* Bit count */
+EXTRACT_HELPER(NB, 11, 5);
+/* Shift count */
+EXTRACT_HELPER(SH, 11, 5);
+/* Vector shift count */
+EXTRACT_HELPER(VSH, 6, 4);
+/* Mask start */
+EXTRACT_HELPER(MB, 6, 5);
+/* Mask end */
+EXTRACT_HELPER(ME, 1, 5);
+/* Trap operand */
+EXTRACT_HELPER(TO, 21, 5);
+
+EXTRACT_HELPER(CRM, 12, 8);
+EXTRACT_HELPER(FM, 17, 8);
+EXTRACT_HELPER(SR, 16, 4);
+EXTRACT_HELPER(FPIMM, 12, 4);
+
+/*** Jump target decoding ***/
+/* Displacement */
+EXTRACT_SHELPER(d, 0, 16);
+/* Immediate address */
+static inline target_ulong LI(uint32_t opcode)
+{
+ return (opcode >> 0) & 0x03FFFFFC;
+}
+
+static inline uint32_t BD(uint32_t opcode)
+{
+ return (opcode >> 0) & 0xFFFC;
+}
+
+EXTRACT_HELPER(BO, 21, 5);
+EXTRACT_HELPER(BI, 16, 5);
+/* Absolute/relative address */
+EXTRACT_HELPER(AA, 1, 1);
+/* Link */
+EXTRACT_HELPER(LK, 0, 1);
+
+/* Create a mask between <start> and <end> bits */
+static inline target_ulong MASK(uint32_t start, uint32_t end)
+{
+ target_ulong ret;
+
+#if defined(TARGET_PPC64)
+ if (likely(start == 0)) {
+ ret = UINT64_MAX << (63 - end);
+ } else if (likely(end == 63)) {
+ ret = UINT64_MAX >> start;
+ }
+#else
+ if (likely(start == 0)) {
+ ret = UINT32_MAX << (31 - end);
+ } else if (likely(end == 31)) {
+ ret = UINT32_MAX >> start;
+ }
+#endif
+ else {
+ ret = (((target_ulong)(-1ULL)) >> (start)) ^
+ (((target_ulong)(-1ULL) >> (end)) >> 1);
+ if (unlikely(start > end))
+ return ~ret;
+ }
+
+ return ret;
+}
+
+/*****************************************************************************/
+/* PowerPC instructions table */
+
+#if defined(DO_PPC_STATISTICS)
+#define GEN_OPCODE(name, op1, op2, op3, invl, _typ) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .pad = { 0, }, \
+ .handler = { \
+ .inval = invl, \
+ .type = _typ, \
+ .handler = &gen_##name, \
+ .oname = stringify(name), \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .pad = { 0, }, \
+ .handler = { \
+ .inval = invl, \
+ .type = _typ, \
+ .handler = &gen_##name, \
+ .oname = onam, \
+ }, \
+ .oname = onam, \
+}
+#else
+#define GEN_OPCODE(name, op1, op2, op3, invl, _typ) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .pad = { 0, }, \
+ .handler = { \
+ .inval = invl, \
+ .type = _typ, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .pad = { 0, }, \
+ .handler = { \
+ .inval = invl, \
+ .type = _typ, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+#endif
+
+/* SPR load/store helpers */
+static inline void gen_load_spr(TCGv t, int reg)
+{
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
+}
+
+static inline void gen_store_spr(int reg, TCGv t)
+{
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
+}
+
+/* Invalid instruction */
+static void gen_invalid(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+static opc_handler_t invalid_handler = {
+ .inval = 0xFFFFFFFF,
+ .type = PPC_NONE,
+ .handler = gen_invalid,
+};
+
+/*** Integer comparison ***/
+
+static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ int l1, l2, l3;
+
+ tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[crf], cpu_crf[crf], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[crf], cpu_crf[crf], 1);
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ l3 = gen_new_label();
+ if (s) {
+ tcg_gen_brcond_tl(TCG_COND_LT, arg0, arg1, l1);
+ tcg_gen_brcond_tl(TCG_COND_GT, arg0, arg1, l2);
+ } else {
+ tcg_gen_brcond_tl(TCG_COND_LTU, arg0, arg1, l1);
+ tcg_gen_brcond_tl(TCG_COND_GTU, arg0, arg1, l2);
+ }
+ tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_EQ);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+ tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_LT);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_GT);
+ gen_set_label(l3);
+}
+
+static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_local_tl(arg1);
+ gen_op_cmp(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ if (s) {
+ tcg_gen_ext32s_tl(t0, arg0);
+ tcg_gen_ext32s_tl(t1, arg1);
+ } else {
+ tcg_gen_ext32u_tl(t0, arg0);
+ tcg_gen_ext32u_tl(t1, arg1);
+ }
+ gen_op_cmp(t0, t1, s, crf);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_local_tl(arg1);
+ gen_op_cmp32(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+#endif
+
+static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
+{
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode))
+ gen_op_cmpi32(reg, 0, 1, 0);
+ else
+#endif
+ gen_op_cmpi(reg, 0, 1, 0);
+}
+
+/* cmp */
+static void gen_cmp(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+ gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 1, crfD(ctx->opcode));
+ else
+#endif
+ gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 1, crfD(ctx->opcode));
+}
+
+/* cmpi */
+static void gen_cmpi(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+ gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+ 1, crfD(ctx->opcode));
+ else
+#endif
+ gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+ 1, crfD(ctx->opcode));
+}
+
+/* cmpl */
+static void gen_cmpl(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+ gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 0, crfD(ctx->opcode));
+ else
+#endif
+ gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 0, crfD(ctx->opcode));
+}
+
+/* cmpli */
+static void gen_cmpli(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+ gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+ 0, crfD(ctx->opcode));
+ else
+#endif
+ gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+ 0, crfD(ctx->opcode));
+}
+
+/* isel (PowerPC 2.03 specification) */
+static void gen_isel(DisasContext *ctx)
+{
+ int l1, l2;
+ uint32_t bi = rC(ctx->opcode);
+ uint32_t mask;
+ TCGv_i32 t0;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+
+ mask = 1 << (3 - (bi & 0x03));
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+ if (rA(ctx->opcode) == 0)
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+
+/*** Integer arithmetic ***/
+
+static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
+ TCGv arg1, TCGv arg2, int sub)
+{
+ int l1;
+ TCGv t0;
+
+ l1 = gen_new_label();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ t0 = tcg_temp_local_new();
+ tcg_gen_xor_tl(t0, arg0, arg1);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ tcg_gen_ext32s_tl(t0, t0);
+#endif
+ if (sub)
+ tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
+ else
+ tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+ tcg_gen_xor_tl(t0, arg1, arg2);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ tcg_gen_ext32s_tl(t0, t0);
+#endif
+ if (sub)
+ tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+ else
+ tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_arith_compute_ca(DisasContext *ctx, TCGv arg1,
+ TCGv arg2, int sub)
+{
+ int l1 = gen_new_label();
+
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode)) {
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ tcg_gen_ext32u_tl(t0, arg1);
+ tcg_gen_ext32u_tl(t1, arg2);
+ if (sub) {
+ tcg_gen_brcond_tl(TCG_COND_GTU, t0, t1, l1);
+ } else {
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ }
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ } else
+#endif
+ {
+ if (sub) {
+ tcg_gen_brcond_tl(TCG_COND_GTU, arg1, arg2, l1);
+ } else {
+ tcg_gen_brcond_tl(TCG_COND_GEU, arg1, arg2, l1);
+ }
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+ gen_set_label(l1);
+ }
+}
+
+/* Common add function */
+static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int add_ca, int compute_ca,
+ int compute_ov)
+{
+ TCGv t0, t1;
+
+ if ((!compute_ca && !compute_ov) ||
+ (!TCGV_EQUAL(ret,arg1) && !TCGV_EQUAL(ret, arg2))) {
+ t0 = ret;
+ } else {
+ t0 = tcg_temp_local_new();
+ }
+
+ if (add_ca) {
+ t1 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
+ tcg_gen_shri_tl(t1, t1, XER_CA);
+ } else {
+ TCGV_UNUSED(t1);
+ }
+
+ if (compute_ca && compute_ov) {
+ /* Start with XER CA and OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
+ } else if (compute_ca) {
+ /* Start with XER CA disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ } else if (compute_ov) {
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+
+ tcg_gen_add_tl(t0, arg1, arg2);
+
+ if (compute_ca) {
+ gen_op_arith_compute_ca(ctx, t0, arg1, 0);
+ }
+ if (add_ca) {
+ tcg_gen_add_tl(t0, t0, t1);
+ gen_op_arith_compute_ca(ctx, t0, t1, 0);
+ tcg_temp_free(t1);
+ }
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
+ }
+
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, t0);
+
+ if (!TCGV_EQUAL(t0, ret)) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Add functions with two operands */
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ add_ca, compute_ca, compute_ov); \
+}
+/* Add functions with one operand and one immediate */
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_local_tl(const_val); \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ add_ca, compute_ca, compute_ov); \
+ tcg_temp_free(t0); \
+}
+
+/* add add. addo addo. */
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+/* addc addc. addco addco. */
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+/* adde adde. addeo addeo. */
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+/* addme addme. addmeo addmeo. */
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+/* addze addze. addzeo addzeo.*/
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
+/* addi */
+static void gen_addi(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ /* li case */
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
+ } else {
+ tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm);
+ }
+}
+/* addic addic.*/
+static inline void gen_op_addic(DisasContext *ctx, TCGv ret, TCGv arg1,
+ int compute_Rc0)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ /* Start with XER CA and OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+
+ if (likely(simm != 0)) {
+ TCGv t0 = tcg_temp_local_new();
+ tcg_gen_addi_tl(t0, arg1, simm);
+ gen_op_arith_compute_ca(ctx, t0, arg1, 0);
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_mov_tl(ret, arg1);
+ }
+ if (compute_Rc0) {
+ gen_set_Rc0(ctx, ret);
+ }
+}
+
+static void gen_addic(DisasContext *ctx)
+{
+ gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
+}
+
+static void gen_addic_(DisasContext *ctx)
+{
+ gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
+}
+
+/* addis */
+static void gen_addis(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ /* lis case */
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
+ } else {
+ tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16);
+ }
+}
+
+static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+ TCGv_i32 t1 = tcg_temp_local_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
+ if (sign) {
+ int l3 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
+ gen_set_label(l3);
+ tcg_gen_div_i32(t0, t0, t1);
+ } else {
+ tcg_gen_divu_i32(t0, t0, t1);
+ }
+ if (compute_ov) {
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ if (sign) {
+ tcg_gen_sari_i32(t0, t0, 31);
+ } else {
+ tcg_gen_movi_i32(t0, 0);
+ }
+ if (compute_ov) {
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ }
+ gen_set_label(l2);
+ tcg_gen_extu_i32_tl(ret, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, ret);
+}
+/* Div functions */
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divwu divwu. divwuo divwuo. */
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
+/* divw divw. divwo divwo. */
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+
+ tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
+ if (sign) {
+ int l3 = gen_new_label();
+ tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
+ gen_set_label(l3);
+ tcg_gen_div_i64(ret, arg1, arg2);
+ } else {
+ tcg_gen_divu_i64(ret, arg1, arg2);
+ }
+ if (compute_ov) {
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ if (sign) {
+ tcg_gen_sari_i64(ret, arg1, 63);
+ } else {
+ tcg_gen_movi_i64(ret, 0);
+ }
+ if (compute_ov) {
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ }
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, ret);
+}
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divwu divwu. divwuo divwuo. */
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
+/* divw divw. divwo divwo. */
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
+#endif
+
+/* mulhw mulhw. */
+static void gen_mulhw(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+#if defined(TARGET_PPC64)
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
+#else
+ tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulhwu mulhwu. */
+static void gen_mulhwu(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+#if defined(TARGET_PPC64)
+ tcg_gen_ext32u_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
+#else
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mullw mullw. */
+static void gen_mullw(DisasContext *ctx)
+{
+ tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mullwo mullwo. */
+static void gen_mullwo(DisasContext *ctx)
+{
+ int l1;
+ TCGv_i64 t0, t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ l1 = gen_new_label();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+#if defined(TARGET_PPC64)
+ tcg_gen_ext32s_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+#else
+ tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+#endif
+ tcg_gen_mul_i64(t0, t0, t1);
+#if defined(TARGET_PPC64)
+ tcg_gen_ext32s_i64(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, cpu_gpr[rD(ctx->opcode)], l1);
+#else
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_ext32s_i64(t1, t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+#endif
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ gen_set_label(l1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulli */
+static void gen_mulli(DisasContext *ctx)
+{
+ tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ SIMM(ctx->opcode));
+}
+#if defined(TARGET_PPC64)
+#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_helper_##name (cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
+}
+/* mulhd mulhd. */
+GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00);
+/* mulhdu mulhdu. */
+GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02);
+
+/* mulld mulld. */
+static void gen_mulld(DisasContext *ctx)
+{
+ tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+/* mulldo mulldo. */
+GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17);
+#endif
+
+/* neg neg. nego nego. */
+static inline void gen_op_arith_neg(DisasContext *ctx, TCGv ret, TCGv arg1,
+ int ov_check)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode) {
+ tcg_gen_mov_tl(t0, arg1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT64_MIN, l1);
+ } else
+#endif
+ {
+ tcg_gen_ext32s_tl(t0, arg1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT32_MIN, l1);
+ }
+ tcg_gen_neg_tl(ret, arg1);
+ if (ov_check) {
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(ret, t0);
+ if (ov_check) {
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ }
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, ret);
+}
+
+static void gen_neg(DisasContext *ctx)
+{
+ gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
+}
+
+static void gen_nego(DisasContext *ctx)
+{
+ gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
+}
+
+/* Common subf function */
+static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int add_ca, int compute_ca,
+ int compute_ov)
+{
+ TCGv t0, t1;
+
+ if ((!compute_ca && !compute_ov) ||
+ (!TCGV_EQUAL(ret, arg1) && !TCGV_EQUAL(ret, arg2))) {
+ t0 = ret;
+ } else {
+ t0 = tcg_temp_local_new();
+ }
+
+ if (add_ca) {
+ t1 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
+ tcg_gen_shri_tl(t1, t1, XER_CA);
+ } else {
+ TCGV_UNUSED(t1);
+ }
+
+ if (compute_ca && compute_ov) {
+ /* Start with XER CA and OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
+ } else if (compute_ca) {
+ /* Start with XER CA disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ } else if (compute_ov) {
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+
+ if (add_ca) {
+ tcg_gen_not_tl(t0, arg1);
+ tcg_gen_add_tl(t0, t0, arg2);
+ gen_op_arith_compute_ca(ctx, t0, arg2, 0);
+ tcg_gen_add_tl(t0, t0, t1);
+ gen_op_arith_compute_ca(ctx, t0, t1, 0);
+ tcg_temp_free(t1);
+ } else {
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ if (compute_ca) {
+ gen_op_arith_compute_ca(ctx, t0, arg2, 1);
+ }
+ }
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
+ }
+
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, t0);
+
+ if (!TCGV_EQUAL(t0, ret)) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Sub functions with Two operands functions */
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ add_ca, compute_ca, compute_ov); \
+}
+/* Sub functions with one operand and one immediate */
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_local_tl(const_val); \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ add_ca, compute_ca, compute_ov); \
+ tcg_temp_free(t0); \
+}
+/* subf subf. subfo subfo. */
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+/* subfc subfc. subfco subfco. */
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+/* subfe subfe. subfeo subfo. */
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+/* subfme subfme. subfmeo subfmeo. */
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+/* subfze subfze. subfzeo subfzeo.*/
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+/* subfic */
+static void gen_subfic(DisasContext *ctx)
+{
+ /* Start with XER CA and OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_const_local_tl(SIMM(ctx->opcode));
+ tcg_gen_sub_tl(t0, t1, cpu_gpr[rA(ctx->opcode)]);
+ gen_op_arith_compute_ca(ctx, t0, t1, 1);
+ tcg_temp_free(t1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+/*** Integer logical ***/
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+/* and & and. */
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
+/* andc & andc. */
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
+
+/* andi. */
+static void gen_andi_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* andis. */
+static void gen_andis_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* cntlzw */
+static void gen_cntlzw(DisasContext *ctx)
+{
+ gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+/* eqv & eqv. */
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
+/* extsb & extsb. */
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
+/* extsh & extsh. */
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
+/* nand & nand. */
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
+/* nor & nor. */
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
+
+/* or & or. */
+static void gen_or(DisasContext *ctx)
+{
+ int rs, ra, rb;
+
+ rs = rS(ctx->opcode);
+ ra = rA(ctx->opcode);
+ rb = rB(ctx->opcode);
+ /* Optimisation for mr. ri case */
+ if (rs != ra || rs != rb) {
+ if (rs != rb)
+ tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
+ else
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[ra]);
+ } else if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rs]);
+#if defined(TARGET_PPC64)
+ } else {
+ int prio = 0;
+
+ switch (rs) {
+ case 1:
+ /* Set process priority to low */
+ prio = 2;
+ break;
+ case 6:
+ /* Set process priority to medium-low */
+ prio = 3;
+ break;
+ case 2:
+ /* Set process priority to normal */
+ prio = 4;
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 31:
+ if (ctx->mem_idx > 0) {
+ /* Set process priority to very low */
+ prio = 1;
+ }
+ break;
+ case 5:
+ if (ctx->mem_idx > 0) {
+ /* Set process priority to medium-hight */
+ prio = 5;
+ }
+ break;
+ case 3:
+ if (ctx->mem_idx > 0) {
+ /* Set process priority to high */
+ prio = 6;
+ }
+ break;
+ case 7:
+ if (ctx->mem_idx > 1) {
+ /* Set process priority to very high */
+ prio = 7;
+ }
+ break;
+#endif
+ default:
+ /* nop */
+ break;
+ }
+ if (prio) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_PPR);
+ tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
+ tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
+ gen_store_spr(SPR_PPR, t0);
+ tcg_temp_free(t0);
+ }
+#endif
+ }
+}
+/* orc & orc. */
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
+
+/* xor & xor. */
+static void gen_xor(DisasContext *ctx)
+{
+ /* Optimisation for "set to zero" case */
+ if (rS(ctx->opcode) != rB(ctx->opcode))
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* ori */
+static void gen_ori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ /* XXX: should handle special NOPs for POWER series */
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* oris */
+static void gen_oris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+}
+
+/* xori */
+static void gen_xori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* xoris */
+static void gen_xoris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+}
+
+/* popcntb : PowerPC 2.03 specification */
+static void gen_popcntb(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_helper_popcntb_64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ else
+#endif
+ gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+#if defined(TARGET_PPC64)
+/* extsw & extsw. */
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
+
+/* cntlzd */
+static void gen_cntlzd(DisasContext *ctx)
+{
+ gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+#endif
+
+/*** Integer rotate ***/
+
+/* rlwimi & rlwimi. */
+static void gen_rlwimi(DisasContext *ctx)
+{
+ uint32_t mb, me, sh;
+
+ mb = MB(ctx->opcode);
+ me = ME(ctx->opcode);
+ sh = SH(ctx->opcode);
+ if (likely(sh == 0 && mb == 0 && me == 31)) {
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ } else {
+ target_ulong mask;
+ TCGv t1;
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_rotli_i32(t2, t2, sh);
+ tcg_gen_extu_i32_i64(t0, t2);
+ tcg_temp_free_i32(t2);
+#else
+ tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
+#endif
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, t0, mask);
+ tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* rlwinm & rlwinm. */
+static void gen_rlwinm(DisasContext *ctx)
+{
+ uint32_t mb, me, sh;
+
+ sh = SH(ctx->opcode);
+ mb = MB(ctx->opcode);
+ me = ME(ctx->opcode);
+
+ if (likely(mb == 0 && me == (31 - sh))) {
+ if (likely(sh == 0)) {
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_shli_tl(t0, t0, sh);
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ }
+ } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_shri_tl(t0, t0, mb);
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ } else {
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_rotli_i32(t1, t1, sh);
+ tcg_gen_extu_i32_i64(t0, t1);
+ tcg_temp_free_i32(t1);
+#else
+ tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
+#endif
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+ tcg_temp_free(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* rlwnm & rlwnm. */
+static void gen_rlwnm(DisasContext *ctx)
+{
+ uint32_t mb, me;
+ TCGv t0;
+#if defined(TARGET_PPC64)
+ TCGv_i32 t1, t2;
+#endif
+
+ mb = MB(ctx->opcode);
+ me = ME(ctx->opcode);
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
+#if defined(TARGET_PPC64)
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_trunc_i64_i32(t2, t0);
+ tcg_gen_rotl_i32(t1, t1, t2);
+ tcg_gen_extu_i32_i64(t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+#else
+ tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
+#endif
+ if (unlikely(mb != 0 || me != 31)) {
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ }
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+#if defined(TARGET_PPC64)
+#define GEN_PPC64_R2(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1); \
+}
+#define GEN_PPC64_R4(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 1); \
+} \
+ \
+static void glue(gen_, name##2)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 0); \
+} \
+ \
+static void glue(gen_, name##3)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 1); \
+}
+
+static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
+ uint32_t sh)
+{
+ if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
+ tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
+ tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ if (likely(mb == 0 && me == 63)) {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ } else {
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+ }
+ tcg_temp_free(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+/* rldicl - rldicl. */
+static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63, sh);
+}
+GEN_PPC64_R4(rldicl, 0x1E, 0x00);
+/* rldicr - rldicr. */
+static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
+{
+ uint32_t sh, me;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldinm(ctx, 0, me, sh);
+}
+GEN_PPC64_R4(rldicr, 0x1E, 0x02);
+/* rldic - rldic. */
+static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63 - sh, sh);
+}
+GEN_PPC64_R4(rldic, 0x1E, 0x04);
+
+static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
+{
+ TCGv t0;
+
+ mb = MB(ctx->opcode);
+ me = ME(ctx->opcode);
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ if (unlikely(mb != 0 || me != 63)) {
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ }
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* rldcl - rldcl. */
+static inline void gen_rldcl(DisasContext *ctx, int mbn)
+{
+ uint32_t mb;
+
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldnm(ctx, mb, 63);
+}
+GEN_PPC64_R2(rldcl, 0x1E, 0x08);
+/* rldcr - rldcr. */
+static inline void gen_rldcr(DisasContext *ctx, int men)
+{
+ uint32_t me;
+
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldnm(ctx, 0, me);
+}
+GEN_PPC64_R2(rldcr, 0x1E, 0x09);
+/* rldimi - rldimi. */
+static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb, me;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ me = 63 - sh;
+ if (unlikely(sh == 0 && mb == 0)) {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ } else {
+ TCGv t0, t1;
+ target_ulong mask;
+
+ t0 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ t1 = tcg_temp_new();
+ mask = MASK(mb, me);
+ tcg_gen_andi_tl(t0, t0, mask);
+ tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+GEN_PPC64_R4(rldimi, 0x1E, 0x06);
+#endif
+
+/*** Integer shift ***/
+
+/* slw & slw. */
+static void gen_slw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraw & sraw. */
+static void gen_sraw(DisasContext *ctx)
+{
+ gen_helper_sraw(cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srawi & srawi. */
+static void gen_srawi(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ if (sh != 0) {
+ int l1, l2;
+ TCGv t0;
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ t0 = tcg_temp_local_new();
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ gen_set_label(l2);
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], t0, sh);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srw & srw. */
+static void gen_srw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_ext32u_tl(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+#if defined(TARGET_PPC64)
+/* sld & sld. */
+static void gen_sld(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srad & srad. */
+static void gen_srad(DisasContext *ctx)
+{
+ gen_helper_srad(cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+/* sradi & sradi. */
+static inline void gen_sradi(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ if (sh != 0) {
+ int l1, l2;
+ TCGv t0;
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ t0 = tcg_temp_local_new();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ }
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+static void gen_sradi0(DisasContext *ctx)
+{
+ gen_sradi(ctx, 0);
+}
+
+static void gen_sradi1(DisasContext *ctx)
+{
+ gen_sradi(ctx, 1);
+}
+
+/* srd & srd. */
+static void gen_srd(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+#endif
+
+/*** Floating-Point arithmetic ***/
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ /* NIP cannot be restored if the memory exception comes from an helper */ \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
+ Rc(ctx->opcode) != 0); \
+}
+
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ /* NIP cannot be restored if the memory exception comes from an helper */ \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
+}
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ /* NIP cannot be restored if the memory exception comes from an helper */ \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
+}
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ /* NIP cannot be restored if the memory exception comes from an helper */ \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
+}
+
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ /* NIP cannot be restored if the memory exception comes from an helper */ \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
+}
+
+/* fadd - fadds */
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
+/* fdiv - fdivs */
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
+/* fmul - fmuls */
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
+
+/* fre */
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
+
+/* fres */
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
+
+/* frsqrte */
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
+
+/* frsqrtes */
+static void gen_frsqrtes(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
+}
+
+/* fsel */
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
+/* fsub - fsubs */
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
+/* Optional: */
+
+/* fsqrt */
+static void gen_fsqrt(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
+}
+
+static void gen_fsqrts(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
+}
+
+/*** Floating-Point multiply-and-add ***/
+/* fmadd - fmadds */
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
+/* fmsub - fmsubs */
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
+/* fnmadd - fnmadds */
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
+/* fnmsub - fnmsubs */
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
+
+/*** Floating-Point round & convert ***/
+/* fctiw */
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
+/* fctiwz */
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
+/* frsp */
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
+#if defined(TARGET_PPC64)
+/* fcfid */
+GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B);
+/* fctid */
+GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B);
+/* fctidz */
+GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B);
+#endif
+
+/* frin */
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
+/* friz */
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
+/* frip */
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
+/* frim */
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
+
+/*** Floating-Point compare ***/
+
+/* fcmpo */
+static void gen_fcmpo(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpo(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status();
+}
+
+/* fcmpu */
+static void gen_fcmpu(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpu(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status();
+}
+
+/*** Floating-point move ***/
+/* fabs */
+/* XXX: beware that fabs never checks for NaNs nor update FPSCR */
+GEN_FLOAT_B(abs, 0x08, 0x08, 0, PPC_FLOAT);
+
+/* fmr - fmr. */
+/* XXX: beware that fmr never checks for NaNs nor update FPSCR */
+static void gen_fmr(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
+}
+
+/* fnabs */
+/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
+GEN_FLOAT_B(nabs, 0x08, 0x04, 0, PPC_FLOAT);
+/* fneg */
+/* XXX: beware that fneg never checks for NaNs nor update FPSCR */
+GEN_FLOAT_B(neg, 0x08, 0x01, 0, PPC_FLOAT);
+
+/*** Floating-Point status & ctrl register ***/
+
+/* mcrfs */
+static void gen_mcrfs(DisasContext *ctx)
+{
+ int bfa;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ bfa = 4 * (7 - crfS(ctx->opcode));
+ tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
+ tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
+}
+
+/* mffs */
+static void gen_mffs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
+}
+
+/* mtfsb0 */
+static void gen_mtfsb0(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
+ TCGv_i32 t0;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_clrbit(t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
+ }
+}
+
+/* mtfsb1 */
+static void gen_mtfsb1(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ /* XXX: we pretend we can only do IEEE floating-point computations */
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
+ TCGv_i32 t0;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_setbit(t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status();
+}
+
+/* mtfsf */
+static void gen_mtfsf(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+ int L = ctx->opcode & 0x02000000;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ if (L)
+ t0 = tcg_const_i32(0xff);
+ else
+ t0 = tcg_const_i32(FM(ctx->opcode));
+ gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status();
+}
+
+/* mtfsfi */
+static void gen_mtfsfi(DisasContext *ctx)
+{
+ int bf, sh;
+ TCGv_i64 t0;
+ TCGv_i32 t1;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ bf = crbD(ctx->opcode) >> 2;
+ sh = 7 - bf;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_reset_fpstatus();
+ t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh));
+ t1 = tcg_const_i32(1 << sh);
+ gen_helper_store_fpscr(t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status();
+}
+
+/*** Addressing modes ***/
+/* Register indirect with immediate index : EA = (rA|0) + SIMM */
+static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
+ target_long maskl)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ simm &= ~maskl;
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_movi_tl(EA, (uint32_t)simm);
+ } else
+#endif
+ tcg_gen_movi_tl(EA, simm);
+ } else if (likely(simm != 0)) {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ } else {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
+#endif
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else
+#endif
+ tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
+}
+
+static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, 0);
+ } else {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
+#endif
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ target_long val)
+{
+ tcg_gen_addi_tl(ret, arg1, val);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(ret, ret);
+ }
+#endif
+}
+
+static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1, t2;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ tcg_gen_andi_tl(t0, EA, mask);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
+ t2 = tcg_const_i32(0);
+ gen_helper_raise_exception_err(t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+}
+
+/*** Integer load ***/
+static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
+}
+
+static inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
+}
+
+static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_bswap16_tl(arg1, arg1);
+ }
+}
+
+static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ tcg_gen_bswap16_tl(arg1, arg1);
+ tcg_gen_ext16s_tl(arg1, arg1);
+ } else {
+ tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
+ }
+}
+
+static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_bswap32_tl(arg1, arg1);
+ }
+}
+
+#if defined(TARGET_PPC64)
+static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ tcg_gen_bswap32_tl(arg1, arg1);
+ tcg_gen_ext32s_tl(arg1, arg1);
+ } else
+ tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
+}
+#endif
+
+static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_bswap64_i64(arg1, arg1);
+ }
+}
+
+static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
+}
+
+static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (unlikely(ctx->le_mode)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_tl(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
+}
+
+static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (unlikely(ctx->le_mode)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, arg1);
+ tcg_gen_bswap32_tl(t0, t0);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
+}
+
+static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ if (unlikely(ctx->le_mode)) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_bswap64_i64(t0, arg1);
+ tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
+ tcg_temp_free_i64(t0);
+ } else
+ tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
+}
+
+#define GEN_LD(name, ldop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDU(name, ldop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0 || \
+ rA(ctx->opcode) == rD(ctx->opcode))) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ if (type == PPC_64B) \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ else \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUX(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0 || \
+ rA(ctx->opcode) == rD(ctx->opcode))) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDX(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDS(name, ldop, op, type) \
+GEN_LD(name, ldop, op | 0x20, type); \
+GEN_LDU(name, ldop, op | 0x21, type); \
+GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
+GEN_LDX(name, ldop, 0x17, op | 0x00, type)
+
+/* lbz lbzu lbzux lbzx */
+GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
+/* lha lhau lhaux lhax */
+GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
+/* lhz lhzu lhzux lhzx */
+GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
+/* lwz lwzu lwzux lwzx */
+GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+/* lwaux */
+GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
+/* lwax */
+GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
+/* ldux */
+GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
+/* ldx */
+GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
+
+static void gen_ld(DisasContext *ctx)
+{
+ TCGv EA;
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0 ||
+ rA(ctx->opcode) == rD(ctx->opcode))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+ if (ctx->opcode & 0x02) {
+ /* lwa (lwau is undefined) */
+ gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ } else {
+ /* ld - ldu */
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ }
+ if (Rc(ctx->opcode))
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+}
+
+/* lq */
+static void gen_lq(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ int ra, rd;
+ TCGv EA;
+
+ /* Restore CPU state */
+ if (unlikely(ctx->mem_idx == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ ra = rA(ctx->opcode);
+ rd = rD(ctx->opcode);
+ if (unlikely((rd & 1) || rd == ra)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ if (unlikely(ctx->le_mode)) {
+ /* Little-endian mode is not handled */
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x0F);
+ gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ tcg_temp_free(EA);
+#endif
+}
+#endif
+
+/*** Integer store ***/
+#define GEN_ST(name, stop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STU(name, stop, opc, type) \
+static void glue(gen_, stop##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ if (type == PPC_64B) \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ else \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUX(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STX(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STS(name, stop, op, type) \
+GEN_ST(name, stop, op | 0x20, type); \
+GEN_STU(name, stop, op | 0x21, type); \
+GEN_STUX(name, stop, 0x17, op | 0x01, type); \
+GEN_STX(name, stop, 0x17, op | 0x00, type)
+
+/* stb stbu stbux stbx */
+GEN_STS(stb, st8, 0x06, PPC_INTEGER);
+/* sth sthu sthux sthx */
+GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
+/* stw stwu stwux stwx */
+GEN_STS(stw, st32, 0x04, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
+
+static void gen_std(DisasContext *ctx)
+{
+ int rs;
+ TCGv EA;
+
+ rs = rS(ctx->opcode);
+ if ((ctx->opcode & 0x3) == 0x2) {
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ /* stq */
+ if (unlikely(ctx->mem_idx == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ if (unlikely(rs & 1)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ if (unlikely(ctx->le_mode)) {
+ /* Little-endian mode is not handled */
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ tcg_temp_free(EA);
+#endif
+ } else {
+ /* std / stdu */
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ if (Rc(ctx->opcode))
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+ }
+}
+#endif
+/*** Integer load and store with byte reverse ***/
+/* lhbrx */
+static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+ tcg_gen_bswap16_tl(arg1, arg1);
+ }
+}
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
+
+/* lwbrx */
+static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+ tcg_gen_bswap32_tl(arg1, arg1);
+ }
+}
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
+
+/* sthbrx */
+static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (likely(!ctx->le_mode)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_tl(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
+}
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
+
+/* stwbrx */
+static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ if (likely(!ctx->le_mode)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, arg1);
+ tcg_gen_bswap32_tl(t0, t0);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
+}
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
+
+/*** Integer load and store multiple ***/
+
+/* lmw */
+static void gen_lmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_lmw(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/* stmw */
+static void gen_stmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rS(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_stmw(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/*** Integer load and store strings ***/
+
+/* lswi */
+/* PowerPC32 specification says we must generate an exception if
+ * rA is in the range of registers to be loaded.
+ * In an other hand, IBM says this is valid, but rA won't be loaded.
+ * For now, I'll follow the spec...
+ */
+static void gen_lswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+ int start = rD(ctx->opcode);
+ int ra = rA(ctx->opcode);
+ int nr;
+
+ if (nb == 0)
+ nb = 32;
+ nr = nb / 4;
+ if (unlikely(((start + nr) > 32 &&
+ start <= ra && (start + nr - 32) > ra) ||
+ ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(start);
+ gen_helper_lsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* lswx */
+static void gen_lswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2, t3;
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ t2 = tcg_const_i32(rA(ctx->opcode));
+ t3 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_lswx(t0, t1, t2, t3);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+}
+
+/* stswi */
+static void gen_stswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ if (nb == 0)
+ nb = 32;
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* stswx */
+static void gen_stswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ gen_set_access_type(ctx, ACCESS_INT);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t1, cpu_xer);
+ tcg_gen_andi_i32(t1, t1, 0x7F);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/*** Memory synchronisation ***/
+/* eieio */
+static void gen_eieio(DisasContext *ctx)
+{
+}
+
+/* isync */
+static void gen_isync(DisasContext *ctx)
+{
+ gen_stop_exception(ctx);
+}
+
+/* lwarx */
+static void gen_lwarx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv gpr = cpu_gpr[rD(ctx->opcode)];
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(ctx, gpr, t0);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUState, reserve_val));
+ tcg_temp_free(t0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+static void gen_conditional_store (DisasContext *ctx, TCGv EA,
+ int reg, int size)
+{
+ TCGv t0 = tcg_temp_new();
+ uint32_t save_exception = ctx->exception;
+
+ tcg_gen_st_tl(EA, cpu_env, offsetof(CPUState, reserve_ea));
+ tcg_gen_movi_tl(t0, (size << 5) | reg);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, reserve_info));
+ tcg_temp_free(t0);
+ gen_update_nip(ctx, ctx->nip-4);
+ ctx->exception = POWERPC_EXCP_BRANCH;
+ gen_exception(ctx, POWERPC_EXCP_STCX);
+ ctx->exception = save_exception;
+}
+#endif
+
+/* stwcx. */
+static void gen_stwcx_(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), 4);
+#else
+ {
+ int l1;
+
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ }
+#endif
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_PPC64)
+/* ldarx */
+static void gen_ldarx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv gpr = cpu_gpr[rD(ctx->opcode)];
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x07);
+ gen_qemu_ld64(ctx, gpr, t0);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUState, reserve_val));
+ tcg_temp_free(t0);
+}
+
+/* stdcx. */
+static void gen_stdcx_(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x07);
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), 8);
+#else
+ {
+ int l1;
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ }
+#endif
+ tcg_temp_free(t0);
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* sync */
+static void gen_sync(DisasContext *ctx)
+{
+}
+
+/* wait */
+static void gen_wait(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_err(ctx, EXCP_HLT, 1);
+}
+
+/*** Floating-point load ***/
+#define GEN_LDF(name, ldop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUF(name, ldop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUXF(name, ldop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type); \
+GEN_LDUF(name, ldop, op | 0x21, type); \
+GEN_LDUXF(name, ldop, op | 0x01, type); \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ gen_qemu_ld32u(ctx, t0, arg2);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ gen_helper_float32_to_float64(arg1, t1);
+ tcg_temp_free_i32(t1);
+}
+
+ /* lfd lfdu lfdux lfdx */
+GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
+ /* lfs lfsu lfsux lfsx */
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
+
+/*** Floating-point store ***/
+#define GEN_STF(name, stop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUF(name, stop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUXF(name, stop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type); \
+GEN_STUF(name, stop, op | 0x21, type); \
+GEN_STUXF(name, stop, op | 0x01, type); \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new();
+ gen_helper_float64_to_float32(t0, arg1);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ gen_qemu_st32(ctx, t1, arg2);
+ tcg_temp_free(t1);
+}
+
+/* stfd stfdu stfdux stfdx */
+GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
+/* stfs stfsu stfsux stfsx */
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
+
+/* Optional: */
+static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(t0, arg1),
+ gen_qemu_st32(ctx, t0, arg2);
+ tcg_temp_free(t0);
+}
+/* stfiwx */
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
+
+/*** Branch ***/
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ TranslationBlock *tb;
+ tb = ctx->tb;
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ dest = (uint32_t) dest;
+#endif
+ if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
+ likely(!ctx->singlestep_enabled)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ tcg_gen_exit_tb((long)tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ if (unlikely(ctx->singlestep_enabled)) {
+ if ((ctx->singlestep_enabled &
+ (CPU_BRANCH_STEP | CPU_SINGLE_STEP)) &&
+ ctx->exception == POWERPC_EXCP_BRANCH) {
+ target_ulong tmp = ctx->nip;
+ ctx->nip = dest;
+ gen_exception(ctx, POWERPC_EXCP_TRACE);
+ ctx->nip = tmp;
+ }
+ if (ctx->singlestep_enabled & GDBSTUB_SINGLE_STEP) {
+ gen_debug_exception(ctx);
+ }
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode == 0)
+ tcg_gen_movi_tl(cpu_lr, (uint32_t)nip);
+ else
+#endif
+ tcg_gen_movi_tl(cpu_lr, nip);
+}
+
+/* b ba bl bla */
+static void gen_b(DisasContext *ctx)
+{
+ target_ulong li, target;
+
+ ctx->exception = POWERPC_EXCP_BRANCH;
+ /* sign extend LI */
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ li = ((int64_t)LI(ctx->opcode) << 38) >> 38;
+ else
+#endif
+ li = ((int32_t)LI(ctx->opcode) << 6) >> 6;
+ if (likely(AA(ctx->opcode) == 0))
+ target = ctx->nip + li - 4;
+ else
+ target = li;
+ if (LK(ctx->opcode))
+ gen_setlr(ctx, ctx->nip);
+ gen_goto_tb(ctx, 0, target);
+}
+
+#define BCOND_IM 0
+#define BCOND_LR 1
+#define BCOND_CTR 2
+
+static inline void gen_bcond(DisasContext *ctx, int type)
+{
+ uint32_t bo = BO(ctx->opcode);
+ int l1;
+ TCGv target;
+
+ ctx->exception = POWERPC_EXCP_BRANCH;
+ if (type == BCOND_LR || type == BCOND_CTR) {
+ target = tcg_temp_local_new();
+ if (type == BCOND_CTR)
+ tcg_gen_mov_tl(target, cpu_ctr);
+ else
+ tcg_gen_mov_tl(target, cpu_lr);
+ } else {
+ TCGV_UNUSED(target);
+ }
+ if (LK(ctx->opcode))
+ gen_setlr(ctx, ctx->nip);
+ l1 = gen_new_label();
+ if ((bo & 0x4) == 0) {
+ /* Decrement and test CTR */
+ TCGv temp = tcg_temp_new();
+ if (unlikely(type == BCOND_CTR)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ tcg_gen_ext32u_tl(temp, cpu_ctr);
+ else
+#endif
+ tcg_gen_mov_tl(temp, cpu_ctr);
+ if (bo & 0x2) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
+ }
+ tcg_temp_free(temp);
+ }
+ if ((bo & 0x10) == 0) {
+ /* Test CR */
+ uint32_t bi = BI(ctx->opcode);
+ uint32_t mask = 1 << (3 - (bi & 0x03));
+ TCGv_i32 temp = tcg_temp_new_i32();
+
+ if (bo & 0x8) {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
+ } else {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
+ }
+ tcg_temp_free_i32(temp);
+ }
+ if (type == BCOND_IM) {
+ target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
+ if (likely(AA(ctx->opcode) == 0)) {
+ gen_goto_tb(ctx, 0, ctx->nip + li - 4);
+ } else {
+ gen_goto_tb(ctx, 0, li);
+ }
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->nip);
+ } else {
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode))
+ tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
+ else
+#endif
+ tcg_gen_andi_tl(cpu_nip, target, ~3);
+ tcg_gen_exit_tb(0);
+ gen_set_label(l1);
+#if defined(TARGET_PPC64)
+ if (!(ctx->sf_mode))
+ tcg_gen_movi_tl(cpu_nip, (uint32_t)ctx->nip);
+ else
+#endif
+ tcg_gen_movi_tl(cpu_nip, ctx->nip);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void gen_bc(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_IM);
+}
+
+static void gen_bcctr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_CTR);
+}
+
+static void gen_bclr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_LR);
+}
+
+/*** Condition register logical ***/
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ uint8_t bitmask; \
+ int sh; \
+ TCGv_i32 t0, t1; \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
+ t0 = tcg_temp_new_i32(); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
+ t1 = tcg_temp_new_i32(); \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
+ tcg_op(t0, t0, t1); \
+ bitmask = 1 << (3 - (crbD(ctx->opcode) & 0x03)); \
+ tcg_gen_andi_i32(t0, t0, bitmask); \
+ tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
+ tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+/* crand */
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
+/* crandc */
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
+/* creqv */
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
+/* crnand */
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
+/* crnor */
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
+/* cror */
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
+/* crorc */
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
+/* crxor */
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
+
+/* mcrf */
+static void gen_mcrf(DisasContext *ctx)
+{
+ tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
+}
+
+/*** System linkage ***/
+
+/* rfi (mem_idx only) */
+static void gen_rfi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ /* Restore CPU state */
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_rfi();
+ gen_sync_exception(ctx);
+#endif
+}
+
+#if defined(TARGET_PPC64)
+static void gen_rfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ /* Restore CPU state */
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_rfid();
+ gen_sync_exception(ctx);
+#endif
+}
+
+static void gen_hrfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ /* Restore CPU state */
+ if (unlikely(ctx->mem_idx <= 1)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_hrfid();
+ gen_sync_exception(ctx);
+#endif
+}
+#endif
+
+/* sc */
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
+#else
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
+#endif
+static void gen_sc(DisasContext *ctx)
+{
+ uint32_t lev;
+
+ lev = (ctx->opcode >> 5) & 0x7F;
+ gen_exception_err(ctx, POWERPC_SYSCALL, lev);
+}
+
+/*** Trap ***/
+
+/* tw */
+static void gen_tw(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_tw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* twi */
+static void gen_twi(DisasContext *ctx)
+{
+ TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_tw(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#if defined(TARGET_PPC64)
+/* td */
+static void gen_td(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_td(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* tdi */
+static void gen_tdi(DisasContext *ctx)
+{
+ TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_td(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+#endif
+
+/*** Processor control ***/
+
+/* mcrxr */
+static void gen_mcrxr(DisasContext *ctx)
+{
+ tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], XER_CA);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_SO | 1 << XER_OV | 1 << XER_CA));
+}
+
+/* mfcr mfocrf */
+static void gen_mfcr(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ if (likely(ctx->opcode & 0x00100000)) {
+ crm = CRM(ctx->opcode);
+ if (likely(crm && ((crm & (crm - 1)) == 0))) {
+ crn = ctz32 (crm);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rD(ctx->opcode)], crn * 4);
+ }
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t0, cpu_crf[0]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[1]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[2]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[3]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[4]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[5]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[6]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[7]);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+/* mfmsr */
+static void gen_mfmsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
+#endif
+}
+
+static void spr_noaccess(void *opaque, int gprn, int sprn)
+{
+#if 0
+ sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+ printf("ERROR: try to access SPR %d !\n", sprn);
+#endif
+}
+#define SPR_NOACCESS (&spr_noaccess)
+
+/* mfspr */
+static inline void gen_op_mfspr(DisasContext *ctx)
+{
+ void (*read_cb)(void *opaque, int gprn, int sprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (ctx->mem_idx == 2)
+ read_cb = ctx->spr_cb[sprn].hea_read;
+ else if (ctx->mem_idx)
+ read_cb = ctx->spr_cb[sprn].oea_read;
+ else
+#endif
+ read_cb = ctx->spr_cb[sprn].uea_read;
+ if (likely(read_cb != NULL)) {
+ if (likely(read_cb != SPR_NOACCESS)) {
+ (*read_cb)(ctx, rD(ctx->opcode), sprn);
+ } else {
+ /* Privilege exception */
+ /* This is a hack to avoid warnings when running Linux:
+ * this OS breaks the PowerPC virtualisation model,
+ * allowing userland application to read the PVR
+ */
+ if (sprn != SPR_PVR) {
+ qemu_log("Trying to read privileged spr %d %03x at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip);
+ printf("Trying to read privileged spr %d %03x at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip);
+ }
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* Not defined */
+ qemu_log("Trying to read invalid spr %d %03x at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip);
+ printf("Trying to read invalid spr %d %03x at " TARGET_FMT_lx "\n",
+ sprn, sprn, ctx->nip);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+}
+
+static void gen_mfspr(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mftb */
+static void gen_mftb(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mtcrf mtocrf*/
+static void gen_mtcrf(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ crm = CRM(ctx->opcode);
+ if (likely((ctx->opcode & 0x00100000))) {
+ if (crm && ((crm & (crm - 1)) == 0)) {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ crn = ctz32 (crm);
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_shri_i32(temp, temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
+ tcg_temp_free_i32(temp);
+ }
+ } else {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ for (crn = 0 ; crn < 8 ; crn++) {
+ if (crm & (1 << crn)) {
+ tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
+ }
+ }
+ tcg_temp_free_i32(temp);
+ }
+}
+
+/* mtmsr */
+#if defined(TARGET_PPC64)
+static void gen_mtmsrd(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ if (ctx->opcode & 0x00010000) {
+ /* Special form that does not need any synchronisation */
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ } else {
+ /* XXX: we need to update nip before the store
+ * if we enter power saving mode, we will exit the loop
+ * directly from ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_store_msr(cpu_gpr[rS(ctx->opcode)]);
+ /* Must stop the translation as machine state (may have) changed */
+ /* Note that mtmsr is not always defined as context-synchronizing */
+ gen_stop_exception(ctx);
+ }
+#endif
+}
+#endif
+
+static void gen_mtmsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ if (ctx->opcode & 0x00010000) {
+ /* Special form that does not need any synchronisation */
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ } else {
+ /* XXX: we need to update nip before the store
+ * if we enter power saving mode, we will exit the loop
+ * directly from ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->nip);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_msr, 0xFFFFFFFF00000000ULL);
+ tcg_gen_ext32u_tl(t1, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_helper_store_msr(t0);
+ tcg_temp_free(t0);
+ } else
+#endif
+ gen_helper_store_msr(cpu_gpr[rS(ctx->opcode)]);
+ /* Must stop the translation as machine state (may have) changed */
+ /* Note that mtmsr is not always defined as context-synchronizing */
+ gen_stop_exception(ctx);
+ }
+#endif
+}
+
+/* mtspr */
+static void gen_mtspr(DisasContext *ctx)
+{
+ void (*write_cb)(void *opaque, int sprn, int gprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (ctx->mem_idx == 2)
+ write_cb = ctx->spr_cb[sprn].hea_write;
+ else if (ctx->mem_idx)
+ write_cb = ctx->spr_cb[sprn].oea_write;
+ else
+#endif
+ write_cb = ctx->spr_cb[sprn].uea_write;
+ if (likely(write_cb != NULL)) {
+ if (likely(write_cb != SPR_NOACCESS)) {
+ (*write_cb)(ctx, sprn, rS(ctx->opcode));
+ } else {
+ /* Privilege exception */
+ qemu_log("Trying to write privileged spr %d %03x at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip);
+ printf("Trying to write privileged spr %d %03x at " TARGET_FMT_lx
+ "\n", sprn, sprn, ctx->nip);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* Not defined */
+ qemu_log("Trying to write invalid spr %d %03x at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip);
+ printf("Trying to write invalid spr %d %03x at " TARGET_FMT_lx "\n",
+ sprn, sprn, ctx->nip);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+}
+
+/*** Cache management ***/
+
+/* dcbf */
+static void gen_dcbf(DisasContext *ctx)
+{
+ /* XXX: specification says this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbi (Supervisor only) */
+static void gen_dcbi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv EA, val;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ /* XXX: specification says this should be treated as a store by the MMU */
+ gen_qemu_ld8u(ctx, val, EA);
+ gen_qemu_st8(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_temp_free(EA);
+#endif
+}
+
+/* dcdst */
+static void gen_dcbst(DisasContext *ctx)
+{
+ /* XXX: specification say this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbt */
+static void gen_dcbt(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* dcbtst */
+static void gen_dcbtst(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* dcbz */
+static void gen_dcbz(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_dcbz(t0);
+ tcg_temp_free(t0);
+}
+
+static void gen_dcbz_970(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ if (ctx->opcode & 0x00200000)
+ gen_helper_dcbz(t0);
+ else
+ gen_helper_dcbz_970(t0);
+ tcg_temp_free(t0);
+}
+
+/* dst / dstt */
+static void gen_dst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ } else {
+ /* interpreted as no-op */
+ }
+}
+
+/* dstst /dststt */
+static void gen_dstst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ } else {
+ /* interpreted as no-op */
+ }
+
+}
+
+/* dss / dssall */
+static void gen_dss(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* icbi */
+static void gen_icbi(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_icbi(t0);
+ tcg_temp_free(t0);
+}
+
+/* Optional: */
+/* dcba */
+static void gen_dcba(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a store by the MMU
+ * but does not generate any exception
+ */
+}
+
+/*** Segment register manipulation ***/
+/* Supervisor only: */
+
+/* mfsr */
+static void gen_mfsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mfsrin */
+static void gen_mfsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mtsr */
+static void gen_mtsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mtsrin */
+static void gen_mtsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_sr(t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif
+}
+
+#if defined(TARGET_PPC64)
+/* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
+
+/* mfsr */
+static void gen_mfsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mfsrin */
+static void gen_mfsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mtsr */
+static void gen_mtsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* mtsrin */
+static void gen_mtsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* slbmte */
+static void gen_slbmte(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ gen_helper_store_slb(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+#endif
+}
+
+#endif /* defined(TARGET_PPC64) */
+
+/*** Lookaside buffer management ***/
+/* Optional & mem_idx only: */
+
+/* tlbia */
+static void gen_tlbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_tlbia();
+#endif
+}
+
+/* tlbiel */
+static void gen_tlbiel(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* tlbie */
+static void gen_tlbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbie(t0);
+ tcg_temp_free(t0);
+ } else
+#endif
+ gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* tlbsync */
+static void gen_tlbsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* This has no effect: it should ensure that all previous
+ * tlbie have completed
+ */
+ gen_stop_exception(ctx);
+#endif
+}
+
+#if defined(TARGET_PPC64)
+/* slbia */
+static void gen_slbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_slbia();
+#endif
+}
+
+/* slbie */
+static void gen_slbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_slbie(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+#endif
+
+/*** External control ***/
+/* Optional: */
+
+/* eciwx */
+static void gen_eciwx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+/* ecowx */
+static void gen_ecowx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+/* PowerPC 601 specific instructions */
+
+/* abs - abs. */
+static void gen_abs(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* abso - abso. */
+static void gen_abso(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ int l3 = gen_new_label();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l3);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* clcs */
+static void gen_clcs(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
+ gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ /* Rc=1 sets CR0 to an undefined state */
+}
+
+/* div - div. */
+static void gen_div(DisasContext *ctx)
+{
+ gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divo - divo. */
+static void gen_divo(DisasContext *ctx)
+{
+ gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divs - divs. */
+static void gen_divs(DisasContext *ctx)
+{
+ gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divso - divso. */
+static void gen_divso(DisasContext *ctx)
+{
+ gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* doz - doz. */
+static void gen_doz(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* dozo - dozo. */
+static void gen_dozo(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* dozi */
+static void gen_dozi(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1);
+ tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* lscbx - lscbx. */
+static void gen_lscbx(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
+ TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
+
+ gen_addr_reg_index(ctx, t0);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_lscbx(t0, t0, t1, t2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
+ tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, t0);
+ tcg_temp_free(t0);
+}
+
+/* maskg - maskg. */
+static void gen_maskg(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_movi_tl(t3, 0xFFFFFFFF);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F);
+ tcg_gen_addi_tl(t2, t0, 1);
+ tcg_gen_shr_tl(t2, t3, t2);
+ tcg_gen_shr_tl(t3, t3, t1);
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3);
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* maskir - maskir. */
+static void gen_maskir(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* mul - mul. */
+static void gen_mul(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulo - mulo. */
+static void gen_mulo(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_gen_ext32s_i64(t1, t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ gen_set_label(l1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* nabs - nabs. */
+static void gen_nabs(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* nabso - nabso. */
+static void gen_nabso(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ /* nabs never overflows */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* rlmi - rlmi. */
+static void gen_rlmi(DisasContext *ctx)
+{
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_andi_tl(t0, t0, MASK(mb, me));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* rrib - rrib. */
+static void gen_rrib(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0x80000000);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sle - sle. */
+static void gen_sle(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sleq - sleq. */
+static void gen_sleq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t2, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t2, t2, t0);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sliq - sliq. */
+static void gen_sliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* slliq - slliq. */
+static void gen_slliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sllq - sllq. */
+static void gen_sllq(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ gen_load_spr(t2, SPR_MQ);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* slq - slq. */
+static void gen_slq(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraiq - sraiq. */
+static void gen_sraiq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t0, t0, t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_CA));
+ gen_set_label(l1);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraq - sraq. */
+static void gen_sraq(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1);
+ tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_CA));
+ gen_set_label(l2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sre - sre. */
+static void gen_sre(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srea - srea. */
+static void gen_srea(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sreq */
+static void gen_sreq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t2, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t2, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sriq */
+static void gen_sriq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srliq */
+static void gen_srliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srlq */
+static void gen_srlq(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t2, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ gen_load_spr(t1, SPR_MQ);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srq */
+static void gen_srq(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* PowerPC 602 specific instructions */
+
+/* dsa */
+static void gen_dsa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* esa */
+static void gen_esa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* mfrom */
+static void gen_mfrom(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif
+}
+
+/* 602 - 603 - G2 TLB management */
+
+/* tlbld */
+static void gen_tlbld_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_6xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* tlbli */
+static void gen_tlbli_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_6xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* 74xx TLB management */
+
+/* tlbld */
+static void gen_tlbld_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_74xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* tlbli */
+static void gen_tlbli_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_74xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+
+/* POWER instructions not in PowerPC 601 */
+
+/* clf */
+static void gen_clf(DisasContext *ctx)
+{
+ /* Cache line flush: implemented as no-op */
+}
+
+/* cli */
+static void gen_cli(DisasContext *ctx)
+{
+ /* Cache line invalidate: privileged and treated as no-op */
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+#endif
+}
+
+/* dclst */
+static void gen_dclst(DisasContext *ctx)
+{
+ /* Data cache line store: treated as no-op */
+}
+
+static void gen_mfsri(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_shri_tl(t0, t0, 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rd], t0);
+ tcg_temp_free(t0);
+ if (ra != 0 && ra != rd)
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
+#endif
+}
+
+static void gen_rac(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_rac(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#endif
+}
+
+static void gen_rfsvc(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_helper_rfsvc();
+ gen_sync_exception(ctx);
+#endif
+}
+
+/* svc is not implemented for now */
+
+/* POWER2 specific instructions */
+/* Quad manipulation (load/store two floats at a time) */
+
+/* lfq */
+static void gen_lfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqu */
+static void gen_lfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* lfqux */
+static void gen_lfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqx */
+static void gen_lfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfq */
+static void gen_stfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqu */
+static void gen_stfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqux */
+static void gen_stfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqx */
+static void gen_stfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* BookE specific instructions */
+
+/* XXX: not implemented on 440 ? */
+static void gen_mfapidi(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_tlbiva(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif
+}
+
+/* All 405 MAC instructions are translated here */
+static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
+ int ra, int rb, int rt, int Rc)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+
+ switch (opc3 & 0x0D) {
+ case 0x05:
+ /* macchw - macchw. - macchwo - macchwo. */
+ /* macchws - macchws. - macchwso - macchwso. */
+ /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
+ /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
+ /* mulchw - mulchw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x04:
+ /* macchwu - macchwu. - macchwuo - macchwuo. */
+ /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
+ /* mulchwu - mulchwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x01:
+ /* machhw - machhw. - machhwo - machhwo. */
+ /* machhws - machhws. - machhwso - machhwso. */
+ /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
+ /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
+ /* mulhhw - mulhhw. */
+ tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16s_tl(t0, t0);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x00:
+ /* machhwu - machhwu. - machhwuo - machhwuo. */
+ /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
+ /* mulhhwu - mulhhwu. */
+ tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16u_tl(t0, t0);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x0D:
+ /* maclhw - maclhw. - maclhwo - maclhwo. */
+ /* maclhws - maclhws. - maclhwso - maclhwso. */
+ /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
+ /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
+ /* mullhw - mullhw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
+ break;
+ case 0x0C:
+ /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
+ /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
+ /* mullhwu - mullhwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
+ break;
+ }
+ if (opc2 & 0x04) {
+ /* (n)multiply-and-accumulate (0x0C / 0x0E) */
+ tcg_gen_mul_tl(t1, t0, t1);
+ if (opc2 & 0x02) {
+ /* nmultiply-and-accumulate (0x0E) */
+ tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
+ } else {
+ /* multiply-and-accumulate (0x0C) */
+ tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
+ }
+
+ if (opc3 & 0x12) {
+ /* Check overflow and/or saturate */
+ int l1 = gen_new_label();
+
+ if (opc3 & 0x10) {
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ }
+ if (opc3 & 0x01) {
+ /* Signed */
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
+ tcg_gen_xori_tl(t0, t0, 0x7fffffff);
+ }
+ } else {
+ /* Unsigned */
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_movi_tl(t0, UINT32_MAX);
+ }
+ }
+ if (opc3 & 0x10) {
+ /* Check overflow */
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ }
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
+ }
+ } else {
+ tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc) != 0) {
+ /* Update Rc0 */
+ gen_set_Rc0(ctx, cpu_gpr[rt]);
+ }
+}
+
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
+ rD(ctx->opcode), Rc(ctx->opcode)); \
+}
+
+/* macchw - macchw. */
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
+/* macchwo - macchwo. */
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
+/* macchws - macchws. */
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
+/* macchwso - macchwso. */
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
+/* macchwsu - macchwsu. */
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
+/* macchwsuo - macchwsuo. */
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
+/* macchwu - macchwu. */
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
+/* macchwuo - macchwuo. */
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
+/* machhw - machhw. */
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
+/* machhwo - machhwo. */
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
+/* machhws - machhws. */
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
+/* machhwso - machhwso. */
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
+/* machhwsu - machhwsu. */
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
+/* machhwsuo - machhwsuo. */
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
+/* machhwu - machhwu. */
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
+/* machhwuo - machhwuo. */
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
+/* maclhw - maclhw. */
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
+/* maclhwo - maclhwo. */
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
+/* maclhws - maclhws. */
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
+/* maclhwso - maclhwso. */
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
+/* maclhwu - maclhwu. */
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
+/* maclhwuo - maclhwuo. */
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
+/* maclhwsu - maclhwsu. */
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
+/* maclhwsuo - maclhwsuo. */
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
+/* nmacchw - nmacchw. */
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
+/* nmacchwo - nmacchwo. */
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
+/* nmacchws - nmacchws. */
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
+/* nmacchwso - nmacchwso. */
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
+/* nmachhw - nmachhw. */
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
+/* nmachhwo - nmachhwo. */
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
+/* nmachhws - nmachhws. */
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
+/* nmachhwso - nmachhwso. */
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
+/* nmaclhw - nmaclhw. */
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
+/* nmaclhwo - nmaclhwo. */
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
+/* nmaclhws - nmaclhws. */
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
+/* nmaclhwso - nmaclhwso. */
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
+
+/* mulchw - mulchw. */
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
+/* mulchwu - mulchwu. */
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
+/* mulhhw - mulhhw. */
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
+/* mulhhwu - mulhhwu. */
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
+/* mullhw - mullhw. */
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
+/* mullhwu - mullhwu. */
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
+
+/* mfdcr */
+static void gen_mfdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv dcrn;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], dcrn);
+ tcg_temp_free(dcrn);
+#endif
+}
+
+/* mtdcr */
+static void gen_mtdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGv dcrn;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_store_dcr(dcrn, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(dcrn);
+#endif
+}
+
+/* mfdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mfdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif
+}
+
+/* mtdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mtdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif
+}
+
+/* mfdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mfdcrux(DisasContext *ctx)
+{
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* mtdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mtdcrux(DisasContext *ctx)
+{
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* dccci */
+static void gen_dccci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* interpreted as no-op */
+#endif
+}
+
+/* dcread */
+static void gen_dcread(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv EA, val;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ gen_qemu_ld32u(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+#endif
+}
+
+/* icbt */
+static void gen_icbt_40x(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* iccci */
+static void gen_iccci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* interpreted as no-op */
+#endif
+}
+
+/* icread */
+static void gen_icread(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* interpreted as no-op */
+#endif
+}
+
+/* rfci (mem_idx only) */
+static void gen_rfci_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* Restore CPU state */
+ gen_helper_40x_rfci();
+ gen_sync_exception(ctx);
+#endif
+}
+
+static void gen_rfci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* Restore CPU state */
+ gen_helper_rfci();
+ gen_sync_exception(ctx);
+#endif
+}
+
+/* BookE specific */
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfdi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* Restore CPU state */
+ gen_helper_rfdi();
+ gen_sync_exception(ctx);
+#endif
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfmci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ /* Restore CPU state */
+ gen_helper_rfmci();
+ gen_sync_exception(ctx);
+#endif
+}
+
+/* TLB management - PowerPC 405 implementation */
+
+/* tlbre */
+static void gen_tlbre_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ int l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif
+}
+
+/* tlbwe */
+static void gen_tlbwe_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbwe_hi(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbwe_lo(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif
+}
+
+/* TLB management - PowerPC 440 implementation */
+
+/* tlbre */
+static void gen_tlbre_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ int l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif
+}
+
+/* tlbwe */
+static void gen_tlbwe_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif
+}
+
+/* wrtee */
+static void gen_wrtee(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ /* Stop translation to have a chance to raise an exception
+ * if we just set msr_ee to 1
+ */
+ gen_stop_exception(ctx);
+#endif
+}
+
+/* wrteei */
+static void gen_wrteei(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+#else
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ if (ctx->opcode & 0x00008000) {
+ tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ /* Stop translation to have a chance to raise an exception */
+ gen_stop_exception(ctx);
+ } else {
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ }
+#endif
+}
+
+/* PowerPC 440 specific instructions */
+
+/* dlmzb */
+static void gen_dlmzb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* mbar replaces eieio on 440 */
+static void gen_mbar(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* msync replaces sync on 440 */
+static void gen_msync(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* icbt */
+static void gen_icbt_440(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/*** Altivec vector extension ***/
+/* Altivec registers moves */
+
+static inline TCGv_ptr gen_avr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]));
+ return r;
+}
+
+#define GEN_VR_LDX(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_STX(name, opc2, opc3) \
+static void gen_st##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ if (ctx->le_mode) { \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_LVE(name, opc2, opc3) \
+static void gen_lve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_lve##name (rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+#define GEN_VR_STVE(name, opc2, opc3) \
+static void gen_stve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_stve##name (rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+GEN_VR_LDX(lvx, 0x07, 0x03);
+/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
+GEN_VR_LDX(lvxl, 0x07, 0x0B);
+
+GEN_VR_LVE(bx, 0x07, 0x00);
+GEN_VR_LVE(hx, 0x07, 0x01);
+GEN_VR_LVE(wx, 0x07, 0x02);
+
+GEN_VR_STX(svx, 0x07, 0x07);
+/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
+GEN_VR_STX(svxl, 0x07, 0x0F);
+
+GEN_VR_STVE(bx, 0x07, 0x04);
+GEN_VR_STVE(hx, 0x07, 0x05);
+GEN_VR_STVE(wx, 0x07, 0x06);
+
+static void gen_lvsl(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsl(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_lvsr(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsr(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_mfvscr(DisasContext *ctx)
+{
+ TCGv_i32 t;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0);
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUState, vscr));
+ tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_mtvscr(DisasContext *ctx)
+{
+ TCGv_ptr p;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ p = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_mtvscr(p);
+ tcg_temp_free_ptr(p);
+}
+
+/* Logical operations */
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \
+ tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \
+}
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16);
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17);
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18);
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19);
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20);
+
+#define GEN_VXFORM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+GEN_VXFORM(vaddubm, 0, 0);
+GEN_VXFORM(vadduhm, 0, 1);
+GEN_VXFORM(vadduwm, 0, 2);
+GEN_VXFORM(vsububm, 0, 16);
+GEN_VXFORM(vsubuhm, 0, 17);
+GEN_VXFORM(vsubuwm, 0, 18);
+GEN_VXFORM(vmaxub, 1, 0);
+GEN_VXFORM(vmaxuh, 1, 1);
+GEN_VXFORM(vmaxuw, 1, 2);
+GEN_VXFORM(vmaxsb, 1, 4);
+GEN_VXFORM(vmaxsh, 1, 5);
+GEN_VXFORM(vmaxsw, 1, 6);
+GEN_VXFORM(vminub, 1, 8);
+GEN_VXFORM(vminuh, 1, 9);
+GEN_VXFORM(vminuw, 1, 10);
+GEN_VXFORM(vminsb, 1, 12);
+GEN_VXFORM(vminsh, 1, 13);
+GEN_VXFORM(vminsw, 1, 14);
+GEN_VXFORM(vavgub, 1, 16);
+GEN_VXFORM(vavguh, 1, 17);
+GEN_VXFORM(vavguw, 1, 18);
+GEN_VXFORM(vavgsb, 1, 20);
+GEN_VXFORM(vavgsh, 1, 21);
+GEN_VXFORM(vavgsw, 1, 22);
+GEN_VXFORM(vmrghb, 6, 0);
+GEN_VXFORM(vmrghh, 6, 1);
+GEN_VXFORM(vmrghw, 6, 2);
+GEN_VXFORM(vmrglb, 6, 4);
+GEN_VXFORM(vmrglh, 6, 5);
+GEN_VXFORM(vmrglw, 6, 6);
+GEN_VXFORM(vmuloub, 4, 0);
+GEN_VXFORM(vmulouh, 4, 1);
+GEN_VXFORM(vmulosb, 4, 4);
+GEN_VXFORM(vmulosh, 4, 5);
+GEN_VXFORM(vmuleub, 4, 8);
+GEN_VXFORM(vmuleuh, 4, 9);
+GEN_VXFORM(vmulesb, 4, 12);
+GEN_VXFORM(vmulesh, 4, 13);
+GEN_VXFORM(vslb, 2, 4);
+GEN_VXFORM(vslh, 2, 5);
+GEN_VXFORM(vslw, 2, 6);
+GEN_VXFORM(vsrb, 2, 8);
+GEN_VXFORM(vsrh, 2, 9);
+GEN_VXFORM(vsrw, 2, 10);
+GEN_VXFORM(vsrab, 2, 12);
+GEN_VXFORM(vsrah, 2, 13);
+GEN_VXFORM(vsraw, 2, 14);
+GEN_VXFORM(vslo, 6, 16);
+GEN_VXFORM(vsro, 6, 17);
+GEN_VXFORM(vaddcuw, 0, 6);
+GEN_VXFORM(vsubcuw, 0, 22);
+GEN_VXFORM(vaddubs, 0, 8);
+GEN_VXFORM(vadduhs, 0, 9);
+GEN_VXFORM(vadduws, 0, 10);
+GEN_VXFORM(vaddsbs, 0, 12);
+GEN_VXFORM(vaddshs, 0, 13);
+GEN_VXFORM(vaddsws, 0, 14);
+GEN_VXFORM(vsububs, 0, 24);
+GEN_VXFORM(vsubuhs, 0, 25);
+GEN_VXFORM(vsubuws, 0, 26);
+GEN_VXFORM(vsubsbs, 0, 28);
+GEN_VXFORM(vsubshs, 0, 29);
+GEN_VXFORM(vsubsws, 0, 30);
+GEN_VXFORM(vrlb, 2, 0);
+GEN_VXFORM(vrlh, 2, 1);
+GEN_VXFORM(vrlw, 2, 2);
+GEN_VXFORM(vsl, 2, 7);
+GEN_VXFORM(vsr, 2, 11);
+GEN_VXFORM(vpkuhum, 7, 0);
+GEN_VXFORM(vpkuwum, 7, 1);
+GEN_VXFORM(vpkuhus, 7, 2);
+GEN_VXFORM(vpkuwus, 7, 3);
+GEN_VXFORM(vpkshus, 7, 4);
+GEN_VXFORM(vpkswus, 7, 5);
+GEN_VXFORM(vpkshss, 7, 6);
+GEN_VXFORM(vpkswss, 7, 7);
+GEN_VXFORM(vpkpx, 7, 12);
+GEN_VXFORM(vsum4ubs, 4, 24);
+GEN_VXFORM(vsum4sbs, 4, 28);
+GEN_VXFORM(vsum4shs, 4, 25);
+GEN_VXFORM(vsum2sws, 4, 26);
+GEN_VXFORM(vsumsws, 4, 30);
+GEN_VXFORM(vaddfp, 5, 0);
+GEN_VXFORM(vsubfp, 5, 1);
+GEN_VXFORM(vmaxfp, 5, 16);
+GEN_VXFORM(vminfp, 5, 17);
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##opname (rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+GEN_VXRFORM(vcmpequb, 3, 0)
+GEN_VXRFORM(vcmpequh, 3, 1)
+GEN_VXRFORM(vcmpequw, 3, 2)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_SIMM(vspltisb, 6, 12);
+GEN_VXFORM_SIMM(vspltish, 6, 13);
+GEN_VXFORM_SIMM(vspltisw, 6, 14);
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_NOA(vupkhsb, 7, 8);
+GEN_VXFORM_NOA(vupkhsh, 7, 9);
+GEN_VXFORM_NOA(vupklsb, 7, 10);
+GEN_VXFORM_NOA(vupklsh, 7, 11);
+GEN_VXFORM_NOA(vupkhpx, 7, 13);
+GEN_VXFORM_NOA(vupklpx, 7, 15);
+GEN_VXFORM_NOA(vrefp, 5, 4);
+GEN_VXFORM_NOA(vrsqrtefp, 5, 5);
+GEN_VXFORM_NOA(vexptefp, 5, 6);
+GEN_VXFORM_NOA(vlogefp, 5, 7);
+GEN_VXFORM_NOA(vrfim, 5, 8);
+GEN_VXFORM_NOA(vrfin, 5, 9);
+GEN_VXFORM_NOA(vrfip, 5, 10);
+GEN_VXFORM_NOA(vrfiz, 5, 11);
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_UIMM(vspltb, 6, 8);
+GEN_VXFORM_UIMM(vsplth, 6, 9);
+GEN_VXFORM_UIMM(vspltw, 6, 10);
+GEN_VXFORM_UIMM(vcfux, 5, 12);
+GEN_VXFORM_UIMM(vcfsx, 5, 13);
+GEN_VXFORM_UIMM(vctuxs, 5, 14);
+GEN_VXFORM_UIMM(vctsxs, 5, 15);
+
+static void gen_vsldoi(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rd;
+ TCGv_i32 sh;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ sh = tcg_const_i32(VSH(ctx->opcode));
+ gen_helper_vsldoi (rd, ra, rb, sh);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rd);
+ tcg_temp_free_i32(sh);
+}
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ if (Rc(ctx->opcode)) { \
+ gen_helper_##name1 (rd, ra, rb, rc); \
+ } else { \
+ gen_helper_##name0 (rd, ra, rb, rc); \
+ } \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
+
+static void gen_vmladduhm(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vmladduhm(rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
+GEN_VAFORM_PAIRED(vsel, vperm, 21)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+
+/*** SPE extension ***/
+/* Register moves */
+
+static inline void gen_load_gpr64(TCGv_i64 t, int reg)
+{
+#if defined(TARGET_PPC64)
+ tcg_gen_mov_i64(t, cpu_gpr[reg]);
+#else
+ tcg_gen_concat_i32_i64(t, cpu_gpr[reg], cpu_gprh[reg]);
+#endif
+}
+
+static inline void gen_store_gpr64(int reg, TCGv_i64 t)
+{
+#if defined(TARGET_PPC64)
+ tcg_gen_mov_i64(cpu_gpr[reg], t);
+#else
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_trunc_i64_i32(cpu_gpr[reg], t);
+ tcg_gen_shri_i64(tmp, t, 32);
+ tcg_gen_trunc_i64_i32(cpu_gprh[reg], tmp);
+ tcg_temp_free_i64(tmp);
+#endif
+}
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval, type) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if (Rc(ctx->opcode)) \
+ gen_##name1(ctx); \
+ else \
+ gen_##name0(ctx); \
+}
+
+/* Handler for undefined SPE opcodes */
+static inline void gen_speundef(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* SPE logic */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_LOGIC2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+}
+#else
+#define GEN_SPEOP_LOGIC2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)]); \
+}
+#endif
+
+GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
+GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
+GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
+GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
+GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
+GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
+GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
+GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
+
+/* SPE logic immediate */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ TCGv_i32 t0 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_local_new_i32(); \
+ TCGv_i64 t2 = tcg_temp_local_new_i64(); \
+ tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t1, t2); \
+ tcg_temp_free_i64(t2); \
+ tcg_opi(t1, t1, rB(ctx->opcode)); \
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#else
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_opi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ rB(ctx->opcode)); \
+ tcg_opi(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ rB(ctx->opcode)); \
+}
+#endif
+GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
+
+/* SPE arithmetic */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH1(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ TCGv_i32 t0 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_local_new_i32(); \
+ TCGv_i64 t2 = tcg_temp_local_new_i64(); \
+ tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t1, t2); \
+ tcg_temp_free_i64(t2); \
+ tcg_op(t1, t1); \
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#else
+#define GEN_SPEOP_ARITH1(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \
+}
+#endif
+
+static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
+ tcg_gen_neg_i32(ret, arg1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_i32(ret, arg1);
+ gen_set_label(l2);
+}
+GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
+GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
+GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
+GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
+static inline void gen_op_evrndw(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ tcg_gen_addi_i32(ret, arg1, 0x8000);
+ tcg_gen_ext16u_i32(ret, ret);
+}
+GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
+GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
+GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
+
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ TCGv_i32 t0 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t2 = tcg_temp_local_new_i32(); \
+ TCGv_i64 t3 = tcg_temp_local_new_i64(); \
+ tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_i64_i32(t2, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t2); \
+ tcg_gen_shri_i64(t3, cpu_gpr[rA(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t1, t3); \
+ tcg_gen_shri_i64(t3, cpu_gpr[rB(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t2, t3); \
+ tcg_temp_free_i64(t3); \
+ tcg_op(t1, t1, t2); \
+ tcg_temp_free_i32(t2); \
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#else
+#define GEN_SPEOP_ARITH2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)]); \
+}
+#endif
+
+static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0;
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ t0 = tcg_temp_local_new_i32();
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
+static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0;
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ t0 = tcg_temp_local_new_i32();
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
+static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0;
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ t0 = tcg_temp_local_new_i32();
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
+static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, arg2, 0x1F);
+ tcg_gen_rotl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
+static inline void gen_evmergehi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 32);
+ tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF0000000ULL);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
+}
+GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
+static inline void gen_op_evsubf(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ tcg_gen_sub_i32(ret, arg2, arg1);
+}
+GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
+
+/* SPE arithmetic immediate */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ TCGv_i32 t0 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_local_new_i32(); \
+ TCGv_i64 t2 = tcg_temp_local_new_i64(); \
+ tcg_gen_trunc_i64_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_shri_i64(t2, cpu_gpr[rB(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t1, t2); \
+ tcg_temp_free_i64(t2); \
+ tcg_op(t1, t1, rA(ctx->opcode)); \
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#else
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ rA(ctx->opcode)); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)], \
+ rA(ctx->opcode)); \
+}
+#endif
+GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
+GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
+
+/* SPE comparison */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_COMP(name, tcg_cond) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ int l1 = gen_new_label(); \
+ int l2 = gen_new_label(); \
+ int l3 = gen_new_label(); \
+ int l4 = gen_new_label(); \
+ TCGv_i32 t0 = tcg_temp_local_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_local_new_i32(); \
+ TCGv_i64 t2 = tcg_temp_local_new_i64(); \
+ tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_i64_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_gen_brcond_i32(tcg_cond, t0, t1, l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0); \
+ tcg_gen_br(l2); \
+ gen_set_label(l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
+ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
+ gen_set_label(l2); \
+ tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t0, t2); \
+ tcg_gen_shri_i64(t2, cpu_gpr[rB(ctx->opcode)], 32); \
+ tcg_gen_trunc_i64_i32(t1, t2); \
+ tcg_temp_free_i64(t2); \
+ tcg_gen_brcond_i32(tcg_cond, t0, t1, l3); \
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ ~(CRF_CH | CRF_CH_AND_CL)); \
+ tcg_gen_br(l4); \
+ gen_set_label(l3); \
+ tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ CRF_CH | CRF_CH_OR_CL); \
+ gen_set_label(l4); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#else
+#define GEN_SPEOP_COMP(name, tcg_cond) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ int l1 = gen_new_label(); \
+ int l2 = gen_new_label(); \
+ int l3 = gen_new_label(); \
+ int l4 = gen_new_label(); \
+ \
+ tcg_gen_brcond_i32(tcg_cond, cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)], l1); \
+ tcg_gen_movi_tl(cpu_crf[crfD(ctx->opcode)], 0); \
+ tcg_gen_br(l2); \
+ gen_set_label(l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
+ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
+ gen_set_label(l2); \
+ tcg_gen_brcond_i32(tcg_cond, cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)], l3); \
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ ~(CRF_CH | CRF_CH_AND_CL)); \
+ tcg_gen_br(l4); \
+ gen_set_label(l3); \
+ tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ CRF_CH | CRF_CH_OR_CL); \
+ gen_set_label(l4); \
+}
+#endif
+GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
+GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
+GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
+GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
+GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
+
+/* SPE misc */
+static inline void gen_brinc(DisasContext *ctx)
+{
+ /* Note: brinc is usable even if SPE is disabled */
+ gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergelo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFLL);
+ tcg_gen_shli_tl(t1, cpu_gpr[rA(ctx->opcode)], 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#endif
+}
+static inline void gen_evmergehilo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFLL);
+ tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF0000000ULL);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
+}
+static inline void gen_evmergelohi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 32);
+ tcg_gen_shli_tl(t1, cpu_gpr[rA(ctx->opcode)], 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ if (rD(ctx->opcode) == rA(ctx->opcode)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ }
+#endif
+}
+static inline void gen_evsplati(DisasContext *ctx)
+{
+ uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
+
+#if defined(TARGET_PPC64)
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], (imm << 32) | imm);
+#else
+ tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
+#endif
+}
+static inline void gen_evsplatfi(DisasContext *ctx)
+{
+ uint64_t imm = rA(ctx->opcode) << 27;
+
+#if defined(TARGET_PPC64)
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], (imm << 32) | imm);
+#else
+ tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
+#endif
+}
+
+static inline void gen_evsel(DisasContext *ctx)
+{
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ int l3 = gen_new_label();
+ int l4 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+#if defined(TARGET_PPC64)
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+#endif
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF00000000ULL);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0xFFFFFFFF00000000ULL);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+#endif
+ gen_set_label(l2);
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(t2, cpu_gpr[rA(ctx->opcode)], 0x00000000FFFFFFFFULL);
+#else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif
+ tcg_gen_br(l4);
+ gen_set_label(l3);
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFULL);
+#else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#endif
+ gen_set_label(l4);
+ tcg_temp_free_i32(t0);
+#if defined(TARGET_PPC64)
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t1, t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+#endif
+}
+
+static void gen_evsel0(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel1(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel2(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel3(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, PPC_SPE);
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, PPC_SPE);
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x00000000, PPC_SPE); //
+GEN_SPE(speundef, evand, 0x08, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, PPC_SPE);
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, PPC_SPE);
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, PPC_SPE); //
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, PPC_SPE);
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, PPC_SPE); ////
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); ////
+
+/* SPE load and stores */
+static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
+{
+ target_ulong uimm = rB(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, uimm << sh);
+ } else {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
+}
+
+static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+#else
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_qemu_ld64(ctx, t0, addr);
+ tcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
+#endif
+}
+
+static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+#endif
+}
+
+static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(ctx, t0, addr);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+#endif
+}
+
+static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(ctx, t0, addr);
+ tcg_gen_ext32u_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+#endif
+}
+
+static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(ctx, t0, addr);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+#else
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gprh[rS(ctx->opcode)]);
+ gen_qemu_st64(ctx, t0, addr);
+ tcg_temp_free_i64(t0);
+#endif
+}
+
+static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st32(ctx, t0, addr);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+#endif
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st16(ctx, t0, addr);
+#else
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+#endif
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+#endif
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st32(ctx, t0, addr);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+#endif
+}
+
+static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ t0 = tcg_temp_new(); \
+ if (Rc(ctx->opcode)) { \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
+ } else { \
+ gen_addr_reg_index(ctx, t0); \
+ } \
+ gen_op_##name(ctx, t0); \
+ tcg_temp_free(t0); \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
+
+/* Multiply and add - TODO */
+#if 0
+GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0x00000000, PPC_SPE);
+
+GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0x00000000, PPC_SPE);
+
+GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, PPC_SPE);
+GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, PPC_SPE);
+GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, PPC_SPE);
+GEN_SPE(evmra, speundef, 0x07, 0x13, 0x0000F800, PPC_SPE);
+
+GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0x00000000, PPC_SPE);
+#endif
+
+/*** SPE floating-point extension ***/
+#if defined(TARGET_PPC64)
+#define GEN_SPEFPUOP_CONV_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ TCGv t1; \
+ t0 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, t0); \
+ t1 = tcg_temp_new(); \
+ tcg_gen_extu_i32_tl(t1, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \
+ 0xFFFFFFFF00000000ULL); \
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free(t1); \
+}
+#define GEN_SPEFPUOP_CONV_32_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ TCGv t1; \
+ t0 = tcg_temp_new_i32(); \
+ gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \
+ t1 = tcg_temp_new(); \
+ tcg_gen_extu_i32_tl(t1, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \
+ 0xFFFFFFFF00000000ULL); \
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+}
+#define GEN_SPEFPUOP_CONV_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+}
+#define GEN_SPEFPUOP_ARITH2_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ TCGv_i64 t2; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, t0, t1); \
+ tcg_temp_free_i32(t1); \
+ t2 = tcg_temp_new(); \
+ tcg_gen_extu_i32_tl(t2, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \
+ 0xFFFFFFFF00000000ULL); \
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t2); \
+ tcg_temp_free(t2); \
+}
+#define GEN_SPEFPUOP_ARITH2_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+}
+#define GEN_SPEFPUOP_COMP_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_COMP_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+}
+#else
+#define GEN_SPEFPUOP_CONV_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+}
+#define GEN_SPEFPUOP_CONV_32_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_CONV_64_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_CONV_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t0, t0); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_ARITH2_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+}
+#define GEN_SPEFPUOP_ARITH2_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(t0, t0, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+#define GEN_SPEFPUOP_COMP_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+}
+#define GEN_SPEFPUOP_COMP_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+#endif
+
+/* Single precision floating-point vectors operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(evfsadd);
+GEN_SPEFPUOP_ARITH2_64_64(evfssub);
+GEN_SPEFPUOP_ARITH2_64_64(evfsmul);
+GEN_SPEFPUOP_ARITH2_64_64(evfsdiv);
+static inline void gen_evfsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x8000000080000000LL);
+#else
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x80000000);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000);
+#endif
+}
+static inline void gen_evfsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL);
+#else
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000);
+#endif
+}
+static inline void gen_evfsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL);
+#else
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000);
+#endif
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_64(evfscfui);
+GEN_SPEFPUOP_CONV_64_64(evfscfsi);
+GEN_SPEFPUOP_CONV_64_64(evfscfuf);
+GEN_SPEFPUOP_CONV_64_64(evfscfsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctui);
+GEN_SPEFPUOP_CONV_64_64(evfsctsi);
+GEN_SPEFPUOP_CONV_64_64(evfsctuf);
+GEN_SPEFPUOP_CONV_64_64(evfsctsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctuiz);
+GEN_SPEFPUOP_CONV_64_64(evfsctsiz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(evfscmpgt);
+GEN_SPEFPUOP_COMP_64(evfscmplt);
+GEN_SPEFPUOP_COMP_64(evfscmpeq);
+GEN_SPEFPUOP_COMP_64(evfststgt);
+GEN_SPEFPUOP_COMP_64(evfststlt);
+GEN_SPEFPUOP_COMP_64(evfststeq);
+
+/* Opcodes definitions */
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, PPC_SPE_SINGLE); //
+
+/* Single precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_32_32(efsadd);
+GEN_SPEFPUOP_ARITH2_32_32(efssub);
+GEN_SPEFPUOP_ARITH2_32_32(efsmul);
+GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
+static inline void gen_efsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
+}
+static inline void gen_efsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+static inline void gen_efsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_32_32(efscfui);
+GEN_SPEFPUOP_CONV_32_32(efscfsi);
+GEN_SPEFPUOP_CONV_32_32(efscfuf);
+GEN_SPEFPUOP_CONV_32_32(efscfsf);
+GEN_SPEFPUOP_CONV_32_32(efsctui);
+GEN_SPEFPUOP_CONV_32_32(efsctsi);
+GEN_SPEFPUOP_CONV_32_32(efsctuf);
+GEN_SPEFPUOP_CONV_32_32(efsctsf);
+GEN_SPEFPUOP_CONV_32_32(efsctuiz);
+GEN_SPEFPUOP_CONV_32_32(efsctsiz);
+GEN_SPEFPUOP_CONV_32_64(efscfd);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_32(efscmpgt);
+GEN_SPEFPUOP_COMP_32(efscmplt);
+GEN_SPEFPUOP_COMP_32(efscmpeq);
+GEN_SPEFPUOP_COMP_32(efststgt);
+GEN_SPEFPUOP_COMP_32(efststlt);
+GEN_SPEFPUOP_COMP_32(efststeq);
+
+/* Opcodes definitions */
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, PPC_SPE_SINGLE); //
+
+/* Double precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(efdadd);
+GEN_SPEFPUOP_ARITH2_64_64(efdsub);
+GEN_SPEFPUOP_ARITH2_64_64(efdmul);
+GEN_SPEFPUOP_ARITH2_64_64(efddiv);
+static inline void gen_efdabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x8000000000000000LL);
+#else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000);
+#endif
+}
+static inline void gen_efdnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000000000000LL);
+#else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000);
+#endif
+}
+static inline void gen_efdneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_APU);
+ return;
+ }
+#if defined(TARGET_PPC64)
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000000000000LL);
+#else
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000);
+#endif
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_32(efdcfui);
+GEN_SPEFPUOP_CONV_64_32(efdcfsi);
+GEN_SPEFPUOP_CONV_64_32(efdcfuf);
+GEN_SPEFPUOP_CONV_64_32(efdcfsf);
+GEN_SPEFPUOP_CONV_32_64(efdctui);
+GEN_SPEFPUOP_CONV_32_64(efdctsi);
+GEN_SPEFPUOP_CONV_32_64(efdctuf);
+GEN_SPEFPUOP_CONV_32_64(efdctsf);
+GEN_SPEFPUOP_CONV_32_64(efdctuiz);
+GEN_SPEFPUOP_CONV_32_64(efdctsiz);
+GEN_SPEFPUOP_CONV_64_32(efdcfs);
+GEN_SPEFPUOP_CONV_64_64(efdcfuid);
+GEN_SPEFPUOP_CONV_64_64(efdcfsid);
+GEN_SPEFPUOP_CONV_64_64(efdctuidz);
+GEN_SPEFPUOP_CONV_64_64(efdctsidz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(efdcmpgt);
+GEN_SPEFPUOP_COMP_64(efdcmplt);
+GEN_SPEFPUOP_COMP_64(efdcmpeq);
+GEN_SPEFPUOP_COMP_64(efdtstgt);
+GEN_SPEFPUOP_COMP_64(efdtstlt);
+GEN_SPEFPUOP_COMP_64(efdtsteq);
+
+/* Opcodes definitions */
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, PPC_SPE_DOUBLE); //
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, PPC_SPE_DOUBLE); //
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, PPC_SPE_DOUBLE); //
+
+static opcode_t opcodes[] = {
+GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
+GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
+GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(popcntb, 0x1F, 0x03, 0x03, 0x0000F801, PPC_POPCNTB),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
+GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
+GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
+GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00010000, PPC_FLOAT),
+GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006f0800, PPC_FLOAT),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B),
+GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX),
+GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
+GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
+GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x03FFF801, PPC_MEM_EIEIO),
+GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
+GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
+GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
+GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
+GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT),
+GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
+GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
+GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
+GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
+#endif
+GEN_HANDLER(sc, 0x11, 0xFF, 0xFF, 0x03FFF01D, PPC_FLOW),
+GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
+GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
+GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
+GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
+GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
+GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
+GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
+GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
+#endif
+GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001FF801, PPC_MISC),
+GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000001, PPC_MISC),
+GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
+GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
+GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
+GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x02000001, PPC_CACHE),
+GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x02000001, PPC_CACHE),
+GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03E00001, PPC_CACHE_DCBZ),
+GEN_HANDLER2(dcbz_970, "dcbz", 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZT),
+GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
+GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x02000001, PPC_ALTIVEC),
+GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
+GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
+GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
+GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
+GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
+#if defined(TARGET_PPC64)
+GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x00000000, PPC_SEGMENT_64B),
+#endif
+GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
+GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x03FF0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x03FF0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x03FFFC01, PPC_SLBI),
+GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
+#endif
+GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC),
+GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER),
+GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER),
+GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER),
+GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER),
+GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER),
+GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER),
+GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2),
+GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2),
+GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
+GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
+GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
+GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
+GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
+GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
+GEN_HANDLER(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE),
+GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
+GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
+GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
+GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
+GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
+GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
+GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
+GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
+GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
+GEN_HANDLER(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, PPC_BOOKE),
+GEN_HANDLER(msync, 0x1F, 0x16, 0x12, 0x03FFF801, PPC_BOOKE),
+GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE),
+GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
+GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
+GEN_HANDLER(vsldoi, 0x04, 0x16, 0xFF, 0x00000400, PPC_ALTIVEC),
+GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
+GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE),
+
+#undef GEN_INT_ARITH_ADD
+#undef GEN_INT_ARITH_ADD_CONST
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_INT_ARITH_DIVW
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
+
+#if defined(TARGET_PPC64)
+#undef GEN_INT_ARITH_DIVD
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
+
+#undef GEN_INT_ARITH_MUL_HELPER
+#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
+GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
+GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
+#endif
+
+#undef GEN_INT_ARITH_SUBF
+#undef GEN_INT_ARITH_SUBF_CONST
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_LOGICAL1
+#undef GEN_LOGICAL2
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
+#endif
+
+#if defined(TARGET_PPC64)
+#undef GEN_PPC64_R2
+#undef GEN_PPC64_R4
+#define GEN_PPC64_R2(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B)
+#define GEN_PPC64_R4(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
+ PPC_64B)
+GEN_PPC64_R4(rldicl, 0x1E, 0x00),
+GEN_PPC64_R4(rldicr, 0x1E, 0x02),
+GEN_PPC64_R4(rldic, 0x1E, 0x04),
+GEN_PPC64_R2(rldcl, 0x1E, 0x08),
+GEN_PPC64_R2(rldcr, 0x1E, 0x09),
+GEN_PPC64_R4(rldimi, 0x1E, 0x06),
+#endif
+
+#undef _GEN_FLOAT_ACB
+#undef GEN_FLOAT_ACB
+#undef _GEN_FLOAT_AB
+#undef GEN_FLOAT_AB
+#undef _GEN_FLOAT_AC
+#undef GEN_FLOAT_AC
+#undef GEN_FLOAT_B
+#undef GEN_FLOAT_BS
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type)
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type)
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type)
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type)
+
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT),
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT),
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT),
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT),
+#if defined(TARGET_PPC64)
+GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B),
+GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B),
+GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B),
+#endif
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(abs, 0x08, 0x08, 0, PPC_FLOAT),
+GEN_FLOAT_B(nabs, 0x08, 0x04, 0, PPC_FLOAT),
+GEN_FLOAT_B(neg, 0x08, 0x01, 0, PPC_FLOAT),
+
+#undef GEN_LD
+#undef GEN_LDU
+#undef GEN_LDUX
+#undef GEN_LDX
+#undef GEN_LDS
+#define GEN_LD(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDU(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUX(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDX(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDS(name, ldop, op, type) \
+GEN_LD(name, ldop, op | 0x20, type) \
+GEN_LDU(name, ldop, op | 0x21, type) \
+GEN_LDUX(name, ldop, 0x17, op | 0x01, type) \
+GEN_LDX(name, ldop, 0x17, op | 0x00, type)
+
+GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER)
+GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER)
+GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER)
+GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
+#if defined(TARGET_PPC64)
+GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
+GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
+GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B)
+GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B)
+#endif
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
+
+#undef GEN_ST
+#undef GEN_STU
+#undef GEN_STUX
+#undef GEN_STX
+#undef GEN_STS
+#define GEN_ST(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STU(name, stop, opc, type) \
+GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUX(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STX(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STS(name, stop, op, type) \
+GEN_ST(name, stop, op | 0x20, type) \
+GEN_STU(name, stop, op | 0x21, type) \
+GEN_STUX(name, stop, 0x17, op | 0x01, type) \
+GEN_STX(name, stop, 0x17, op | 0x00, type)
+
+GEN_STS(stb, st8, 0x06, PPC_INTEGER)
+GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
+GEN_STS(stw, st32, 0x04, PPC_INTEGER)
+#if defined(TARGET_PPC64)
+GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
+#endif
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
+
+#undef GEN_LDF
+#undef GEN_LDUF
+#undef GEN_LDUXF
+#undef GEN_LDXF
+#undef GEN_LDFS
+#define GEN_LDF(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUF(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUXF(name, ldop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type) \
+GEN_LDUF(name, ldop, op | 0x21, type) \
+GEN_LDUXF(name, ldop, op | 0x01, type) \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
+
+#undef GEN_STF
+#undef GEN_STUF
+#undef GEN_STUXF
+#undef GEN_STXF
+#undef GEN_STFS
+#define GEN_STF(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUF(name, stop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUXF(name, stop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type) \
+GEN_STUF(name, stop, op | 0x21, type) \
+GEN_STUXF(name, stop, op | 0x01, type) \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
+
+#undef GEN_CRLOGIC
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
+
+#undef GEN_MAC_HANDLER
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
+
+#undef GEN_VR_LDX
+#undef GEN_VR_STX
+#undef GEN_VR_LVE
+#undef GEN_VR_STVE
+#define GEN_VR_LDX(name, opc2, opc3) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STX(name, opc2, opc3) \
+GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_LVE(name, opc2, opc3) \
+ GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STVE(name, opc2, opc3) \
+ GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+GEN_VR_LDX(lvx, 0x07, 0x03),
+GEN_VR_LDX(lvxl, 0x07, 0x0B),
+GEN_VR_LVE(bx, 0x07, 0x00),
+GEN_VR_LVE(hx, 0x07, 0x01),
+GEN_VR_LVE(wx, 0x07, 0x02),
+GEN_VR_STX(svx, 0x07, 0x07),
+GEN_VR_STX(svxl, 0x07, 0x0F),
+GEN_VR_STVE(bx, 0x07, 0x04),
+GEN_VR_STVE(hx, 0x07, 0x05),
+GEN_VR_STVE(wx, 0x07, 0x06),
+
+#undef GEN_VX_LOGICAL
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16),
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17),
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18),
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19),
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20),
+
+#undef GEN_VXFORM
+#define GEN_VXFORM(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM(vaddubm, 0, 0),
+GEN_VXFORM(vadduhm, 0, 1),
+GEN_VXFORM(vadduwm, 0, 2),
+GEN_VXFORM(vsububm, 0, 16),
+GEN_VXFORM(vsubuhm, 0, 17),
+GEN_VXFORM(vsubuwm, 0, 18),
+GEN_VXFORM(vmaxub, 1, 0),
+GEN_VXFORM(vmaxuh, 1, 1),
+GEN_VXFORM(vmaxuw, 1, 2),
+GEN_VXFORM(vmaxsb, 1, 4),
+GEN_VXFORM(vmaxsh, 1, 5),
+GEN_VXFORM(vmaxsw, 1, 6),
+GEN_VXFORM(vminub, 1, 8),
+GEN_VXFORM(vminuh, 1, 9),
+GEN_VXFORM(vminuw, 1, 10),
+GEN_VXFORM(vminsb, 1, 12),
+GEN_VXFORM(vminsh, 1, 13),
+GEN_VXFORM(vminsw, 1, 14),
+GEN_VXFORM(vavgub, 1, 16),
+GEN_VXFORM(vavguh, 1, 17),
+GEN_VXFORM(vavguw, 1, 18),
+GEN_VXFORM(vavgsb, 1, 20),
+GEN_VXFORM(vavgsh, 1, 21),
+GEN_VXFORM(vavgsw, 1, 22),
+GEN_VXFORM(vmrghb, 6, 0),
+GEN_VXFORM(vmrghh, 6, 1),
+GEN_VXFORM(vmrghw, 6, 2),
+GEN_VXFORM(vmrglb, 6, 4),
+GEN_VXFORM(vmrglh, 6, 5),
+GEN_VXFORM(vmrglw, 6, 6),
+GEN_VXFORM(vmuloub, 4, 0),
+GEN_VXFORM(vmulouh, 4, 1),
+GEN_VXFORM(vmulosb, 4, 4),
+GEN_VXFORM(vmulosh, 4, 5),
+GEN_VXFORM(vmuleub, 4, 8),
+GEN_VXFORM(vmuleuh, 4, 9),
+GEN_VXFORM(vmulesb, 4, 12),
+GEN_VXFORM(vmulesh, 4, 13),
+GEN_VXFORM(vslb, 2, 4),
+GEN_VXFORM(vslh, 2, 5),
+GEN_VXFORM(vslw, 2, 6),
+GEN_VXFORM(vsrb, 2, 8),
+GEN_VXFORM(vsrh, 2, 9),
+GEN_VXFORM(vsrw, 2, 10),
+GEN_VXFORM(vsrab, 2, 12),
+GEN_VXFORM(vsrah, 2, 13),
+GEN_VXFORM(vsraw, 2, 14),
+GEN_VXFORM(vslo, 6, 16),
+GEN_VXFORM(vsro, 6, 17),
+GEN_VXFORM(vaddcuw, 0, 6),
+GEN_VXFORM(vsubcuw, 0, 22),
+GEN_VXFORM(vaddubs, 0, 8),
+GEN_VXFORM(vadduhs, 0, 9),
+GEN_VXFORM(vadduws, 0, 10),
+GEN_VXFORM(vaddsbs, 0, 12),
+GEN_VXFORM(vaddshs, 0, 13),
+GEN_VXFORM(vaddsws, 0, 14),
+GEN_VXFORM(vsububs, 0, 24),
+GEN_VXFORM(vsubuhs, 0, 25),
+GEN_VXFORM(vsubuws, 0, 26),
+GEN_VXFORM(vsubsbs, 0, 28),
+GEN_VXFORM(vsubshs, 0, 29),
+GEN_VXFORM(vsubsws, 0, 30),
+GEN_VXFORM(vrlb, 2, 0),
+GEN_VXFORM(vrlh, 2, 1),
+GEN_VXFORM(vrlw, 2, 2),
+GEN_VXFORM(vsl, 2, 7),
+GEN_VXFORM(vsr, 2, 11),
+GEN_VXFORM(vpkuhum, 7, 0),
+GEN_VXFORM(vpkuwum, 7, 1),
+GEN_VXFORM(vpkuhus, 7, 2),
+GEN_VXFORM(vpkuwus, 7, 3),
+GEN_VXFORM(vpkshus, 7, 4),
+GEN_VXFORM(vpkswus, 7, 5),
+GEN_VXFORM(vpkshss, 7, 6),
+GEN_VXFORM(vpkswss, 7, 7),
+GEN_VXFORM(vpkpx, 7, 12),
+GEN_VXFORM(vsum4ubs, 4, 24),
+GEN_VXFORM(vsum4sbs, 4, 28),
+GEN_VXFORM(vsum4shs, 4, 25),
+GEN_VXFORM(vsum2sws, 4, 26),
+GEN_VXFORM(vsumsws, 4, 30),
+GEN_VXFORM(vaddfp, 5, 0),
+GEN_VXFORM(vsubfp, 5, 1),
+GEN_VXFORM(vmaxfp, 5, 16),
+GEN_VXFORM(vminfp, 5, 17),
+
+#undef GEN_VXRFORM1
+#undef GEN_VXRFORM
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+ GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC),
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+GEN_VXRFORM(vcmpequb, 3, 0)
+GEN_VXRFORM(vcmpequh, 3, 1)
+GEN_VXRFORM(vcmpequw, 3, 2)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
+
+#undef GEN_VXFORM_SIMM
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM_SIMM(vspltisb, 6, 12),
+GEN_VXFORM_SIMM(vspltish, 6, 13),
+GEN_VXFORM_SIMM(vspltisw, 6, 14),
+
+#undef GEN_VXFORM_NOA
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
+GEN_VXFORM_NOA(vupkhsb, 7, 8),
+GEN_VXFORM_NOA(vupkhsh, 7, 9),
+GEN_VXFORM_NOA(vupklsb, 7, 10),
+GEN_VXFORM_NOA(vupklsh, 7, 11),
+GEN_VXFORM_NOA(vupkhpx, 7, 13),
+GEN_VXFORM_NOA(vupklpx, 7, 15),
+GEN_VXFORM_NOA(vrefp, 5, 4),
+GEN_VXFORM_NOA(vrsqrtefp, 5, 5),
+GEN_VXFORM_NOA(vexptefp, 5, 6),
+GEN_VXFORM_NOA(vlogefp, 5, 7),
+GEN_VXFORM_NOA(vrfim, 5, 8),
+GEN_VXFORM_NOA(vrfin, 5, 9),
+GEN_VXFORM_NOA(vrfip, 5, 10),
+GEN_VXFORM_NOA(vrfiz, 5, 11),
+
+#undef GEN_VXFORM_UIMM
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM_UIMM(vspltb, 6, 8),
+GEN_VXFORM_UIMM(vsplth, 6, 9),
+GEN_VXFORM_UIMM(vspltw, 6, 10),
+GEN_VXFORM_UIMM(vcfux, 5, 12),
+GEN_VXFORM_UIMM(vcfsx, 5, 13),
+GEN_VXFORM_UIMM(vctuxs, 5, 14),
+GEN_VXFORM_UIMM(vctsxs, 5, 15),
+
+#undef GEN_VAFORM_PAIRED
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
+GEN_VAFORM_PAIRED(vsel, vperm, 21),
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
+
+#undef GEN_SPE
+#define GEN_SPE(name0, name1, opc2, opc3, inval, type) \
+GEN_HANDLER(name0##_##name1, 0x04, opc2, opc3, inval, type)
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, PPC_SPE),
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, PPC_SPE),
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, PPC_SPE),
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(speundef, evand, 0x08, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, PPC_SPE),
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE),
+
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, PPC_SPE_SINGLE),
+
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, PPC_SPE_SINGLE),
+
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, PPC_SPE_DOUBLE),
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, PPC_SPE_DOUBLE),
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, PPC_SPE_DOUBLE),
+
+#undef GEN_SPEOP_LDST
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)
+GEN_SPEOP_LDST(evldd, 0x00, 3),
+GEN_SPEOP_LDST(evldw, 0x01, 3),
+GEN_SPEOP_LDST(evldh, 0x02, 3),
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1),
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1),
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1),
+GEN_SPEOP_LDST(evlwhe, 0x08, 2),
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2),
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2),
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2),
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2),
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3),
+GEN_SPEOP_LDST(evstdw, 0x11, 3),
+GEN_SPEOP_LDST(evstdh, 0x12, 3),
+GEN_SPEOP_LDST(evstwhe, 0x18, 2),
+GEN_SPEOP_LDST(evstwho, 0x1A, 2),
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2),
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2),
+};
+
+#include "translate_init.c"
+#include "helper_regs.h"
+
+/*****************************************************************************/
+/* Misc PowerPC helpers */
+void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+#define RGPL 4
+#define RFPL 4
+
+ int i;
+
+ cpu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR "
+ TARGET_FMT_lx " XER " TARGET_FMT_lx "\n",
+ env->nip, env->lr, env->ctr, env->xer);
+ cpu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
+ TARGET_FMT_lx " idx %d\n", env->msr, env->spr[SPR_HID0],
+ env->hflags, env->mmu_idx);
+#if !defined(NO_TIMER_DUMP)
+ cpu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
+#if !defined(CONFIG_USER_ONLY)
+ " DECR %08" PRIu32
+#endif
+ "\n",
+ cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env)
+#if !defined(CONFIG_USER_ONLY)
+ , cpu_ppc_load_decr(env)
+#endif
+ );
+#endif
+ for (i = 0; i < 32; i++) {
+ if ((i & (RGPL - 1)) == 0)
+ cpu_fprintf(f, "GPR%02d", i);
+ cpu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i));
+ if ((i & (RGPL - 1)) == (RGPL - 1))
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "CR ");
+ for (i = 0; i < 8; i++)
+ cpu_fprintf(f, "%01x", env->crf[i]);
+ cpu_fprintf(f, " [");
+ for (i = 0; i < 8; i++) {
+ char a = '-';
+ if (env->crf[i] & 0x08)
+ a = 'L';
+ else if (env->crf[i] & 0x04)
+ a = 'G';
+ else if (env->crf[i] & 0x02)
+ a = 'E';
+ cpu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
+ }
+ cpu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
+ env->reserve_addr);
+ for (i = 0; i < 32; i++) {
+ if ((i & (RFPL - 1)) == 0)
+ cpu_fprintf(f, "FPR%02d", i);
+ cpu_fprintf(f, " %016" PRIx64, *((uint64_t *)&env->fpr[i]));
+ if ((i & (RFPL - 1)) == (RFPL - 1))
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "FPSCR %08x\n", env->fpscr);
+#if !defined(CONFIG_USER_ONLY)
+ cpu_fprintf(f, "SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx " SDR1 "
+ TARGET_FMT_lx "\n", env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ env->sdr1);
+#endif
+
+#undef RGPL
+#undef RFPL
+}
+
+void cpu_dump_statistics (CPUState *env, FILE*f, fprintf_function cpu_fprintf,
+ int flags)
+{
+#if defined(DO_PPC_STATISTICS)
+ opc_handler_t **t1, **t2, **t3, *handler;
+ int op1, op2, op3;
+
+ t1 = env->opcodes;
+ for (op1 = 0; op1 < 64; op1++) {
+ handler = t1[op1];
+ if (is_indirect_opcode(handler)) {
+ t2 = ind_table(handler);
+ for (op2 = 0; op2 < 32; op2++) {
+ handler = t2[op2];
+ if (is_indirect_opcode(handler)) {
+ t3 = ind_table(handler);
+ for (op3 = 0; op3 < 32; op3++) {
+ handler = t3[op3];
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x %02x %02x (%02x %04d) %16s: "
+ "%016" PRIx64 " %" PRId64 "\n",
+ op1, op2, op3, op1, (op3 << 5) | op2,
+ handler->oname,
+ handler->count, handler->count);
+ }
+ } else {
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x %02x (%02x %04d) %16s: "
+ "%016" PRIx64 " %" PRId64 "\n",
+ op1, op2, op1, op2, handler->oname,
+ handler->count, handler->count);
+ }
+ }
+ } else {
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x (%02x ) %16s: %016" PRIx64
+ " %" PRId64 "\n",
+ op1, op1, handler->oname,
+ handler->count, handler->count);
+ }
+ }
+#endif
+}
+
+/*****************************************************************************/
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ DisasContext ctx, *ctxp = &ctx;
+ opc_handler_t **table, *handler;
+ target_ulong pc_start;
+ uint16_t *gen_opc_end;
+ CPUBreakpoint *bp;
+ int j, lj = -1;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ ctx.nip = pc_start;
+ ctx.tb = tb;
+ ctx.exception = POWERPC_EXCP_NONE;
+ ctx.spr_cb = env->spr_cb;
+ ctx.mem_idx = env->mmu_idx;
+ ctx.access_type = -1;
+ ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0;
+#if defined(TARGET_PPC64)
+ ctx.sf_mode = msr_sf;
+#endif
+ ctx.fpu_enabled = msr_fp;
+ if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
+ ctx.spe_enabled = msr_spe;
+ else
+ ctx.spe_enabled = 0;
+ if ((env->flags & POWERPC_FLAG_VRE) && msr_vr)
+ ctx.altivec_enabled = msr_vr;
+ else
+ ctx.altivec_enabled = 0;
+ if ((env->flags & POWERPC_FLAG_SE) && msr_se)
+ ctx.singlestep_enabled = CPU_SINGLE_STEP;
+ else
+ ctx.singlestep_enabled = 0;
+ if ((env->flags & POWERPC_FLAG_BE) && msr_be)
+ ctx.singlestep_enabled |= CPU_BRANCH_STEP;
+ if (unlikely(env->singlestep_enabled))
+ ctx.singlestep_enabled |= GDBSTUB_SINGLE_STEP;
+#if defined (DO_SINGLE_STEP) && 0
+ /* Single step trace mode */
+ msr_se = 1;
+#endif
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0)
+ max_insns = CF_COUNT_MASK;
+
+ gen_icount_start();
+ /* Set env in case of segfault during code fetch */
+ while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == ctx.nip) {
+ gen_debug_exception(ctxp);
+ break;
+ }
+ }
+ }
+ if (unlikely(search_pc)) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j)
+ gen_opc_instr_start[lj++] = 0;
+ }
+ gen_opc_pc[lj] = ctx.nip;
+ gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
+ }
+ LOG_DISAS("----------------\n");
+ LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
+ ctx.nip, ctx.mem_idx, (int)msr_ir);
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+ if (unlikely(ctx.le_mode)) {
+ ctx.opcode = bswap32(ldl_code(ctx.nip));
+ } else {
+ ctx.opcode = ldl_code(ctx.nip);
+ }
+ LOG_DISAS("translate opcode %08x (%02x %02x %02x) (%s)\n",
+ ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), little_endian ? "little" : "big");
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
+ tcg_gen_debug_insn_start(ctx.nip);
+ ctx.nip += 4;
+ table = env->opcodes;
+ num_insns++;
+ handler = table[opc1(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc2(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc3(ctx.opcode)];
+ }
+ }
+ /* Is opcode *REALLY* valid ? */
+ if (unlikely(handler->handler == &gen_invalid)) {
+ if (qemu_log_enabled()) {
+ qemu_log("invalid/unsupported opcode: "
+ "%02x - %02x - %02x (%08x) " TARGET_FMT_lx " %d\n",
+ opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, (int)msr_ir);
+ }
+ } else {
+ if (unlikely((ctx.opcode & handler->inval) != 0)) {
+ if (qemu_log_enabled()) {
+ qemu_log("invalid bits: %08x for opcode: "
+ "%02x - %02x - %02x (%08x) " TARGET_FMT_lx "\n",
+ ctx.opcode & handler->inval, opc1(ctx.opcode),
+ opc2(ctx.opcode), opc3(ctx.opcode),
+ ctx.opcode, ctx.nip - 4);
+ }
+ gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+ }
+ (*(handler->handler))(&ctx);
+#if defined(DO_PPC_STATISTICS)
+ handler->count++;
+#endif
+ /* Check trace mode exceptions */
+ if (unlikely(ctx.singlestep_enabled & CPU_SINGLE_STEP &&
+ (ctx.nip <= 0x100 || ctx.nip > 0xF00) &&
+ ctx.exception != POWERPC_SYSCALL &&
+ ctx.exception != POWERPC_EXCP_TRAP &&
+ ctx.exception != POWERPC_EXCP_BRANCH)) {
+ gen_exception(ctxp, POWERPC_EXCP_TRACE);
+ } else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
+ (env->singlestep_enabled) ||
+ singlestep ||
+ num_insns >= max_insns)) {
+ /* if we reach a page boundary or are single stepping, stop
+ * generation
+ */
+ break;
+ }
+ }
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ if (ctx.exception == POWERPC_EXCP_NONE) {
+ gen_goto_tb(&ctx, 0, ctx.nip);
+ } else if (ctx.exception != POWERPC_EXCP_BRANCH) {
+ if (unlikely(env->singlestep_enabled)) {
+ gen_debug_exception(ctxp);
+ }
+ /* Generate the return instruction */
+ tcg_gen_exit_tb(0);
+ }
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+ if (unlikely(search_pc)) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j)
+ gen_opc_instr_start[lj++] = 0;
+ } else {
+ tb->size = ctx.nip - pc_start;
+ tb->icount = num_insns;
+ }
+#if defined(DEBUG_DISAS)
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ int flags;
+ flags = env->bfd_mach;
+ flags |= ctx.le_mode << 16;
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, ctx.nip - pc_start, flags);
+ qemu_log("\n");
+ }
+#endif
+}
+
+void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(env, tb, 0);
+}
+
+void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(env, tb, 1);
+}
+
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ env->nip = gen_opc_pc[pc_pos];
+}
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
new file mode 100644
index 0000000..dfcd949
--- /dev/null
+++ b/target-ppc/translate_init.c
@@ -0,0 +1,9768 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* A lot of PowerPC definition have been included here.
+ * Most of them are not usable for now but have been kept
+ * inside "#if defined(TODO) ... #endif" statements to make tests easier.
+ */
+
+#include "dis-asm.h"
+#include "gdbstub.h"
+
+//#define PPC_DUMP_CPU
+//#define PPC_DEBUG_SPR
+//#define PPC_DUMP_SPR_ACCESSES
+#if defined(CONFIG_USER_ONLY)
+#define TODO_USER_ONLY 1
+#endif
+
+struct ppc_def_t {
+ const char *name;
+ uint32_t pvr;
+ uint32_t svr;
+ uint64_t insns_flags;
+ uint64_t msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ uint32_t flags;
+ int bfd_mach;
+ void (*init_proc)(CPUPPCState *env);
+ int (*check_pow)(CPUPPCState *env);
+};
+
+/* For user-mode emulation, we don't emulate any IRQ controller */
+#if defined(CONFIG_USER_ONLY)
+#define PPC_IRQ_INIT_FN(name) \
+static inline void glue(glue(ppc, name),_irq_init) (CPUPPCState *env) \
+{ \
+}
+#else
+#define PPC_IRQ_INIT_FN(name) \
+void glue(glue(ppc, name),_irq_init) (CPUPPCState *env);
+#endif
+
+PPC_IRQ_INIT_FN(40x);
+PPC_IRQ_INIT_FN(6xx);
+PPC_IRQ_INIT_FN(970);
+PPC_IRQ_INIT_FN(e500);
+
+/* Generic callbacks:
+ * do nothing but store/retrieve spr value
+ */
+static void spr_read_generic (void *opaque, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn);
+#ifdef PPC_DUMP_SPR_ACCESSES
+ {
+ TCGv t0 = tcg_const_i32(sprn);
+ gen_helper_load_dump_spr(t0);
+ tcg_temp_free_i32(t0);
+ }
+#endif
+}
+
+static void spr_write_generic (void *opaque, int sprn, int gprn)
+{
+ gen_store_spr(sprn, cpu_gpr[gprn]);
+#ifdef PPC_DUMP_SPR_ACCESSES
+ {
+ TCGv t0 = tcg_const_i32(sprn);
+ gen_helper_store_dump_spr(t0);
+ tcg_temp_free_i32(t0);
+ }
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_clear (void *opaque, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_load_spr(t0, sprn);
+ tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
+ tcg_gen_and_tl(t0, t0, t1);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+#endif
+
+/* SPR common to all PowerPC */
+/* XER */
+static void spr_read_xer (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_xer);
+}
+
+static void spr_write_xer (void *opaque, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_xer, cpu_gpr[gprn]);
+}
+
+/* LR */
+static void spr_read_lr (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
+}
+
+static void spr_write_lr (void *opaque, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
+}
+
+/* CTR */
+static void spr_read_ctr (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
+}
+
+static void spr_write_ctr (void *opaque, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
+}
+
+/* User read access to SPR */
+/* USPRx */
+/* UMMCRx */
+/* UPMCx */
+/* USIA */
+/* UDECR */
+static void spr_read_ureg (void *opaque, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
+}
+
+/* SPR common to all non-embedded PowerPC */
+/* DECR */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_decr (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_decr(cpu_gpr[gprn]);
+}
+
+static void spr_write_decr (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_decr(cpu_gpr[gprn]);
+}
+#endif
+
+/* SPR common to all non-embedded PowerPC, except 601 */
+/* Time base */
+static void spr_read_tbl (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_tbl(cpu_gpr[gprn]);
+}
+
+static void spr_read_tbu (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_tbu(cpu_gpr[gprn]);
+}
+
+__attribute__ (( unused ))
+static void spr_read_atbl (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_atbl(cpu_gpr[gprn]);
+}
+
+__attribute__ (( unused ))
+static void spr_read_atbu (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_atbu(cpu_gpr[gprn]);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_tbl (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_tbl(cpu_gpr[gprn]);
+}
+
+static void spr_write_tbu (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_tbu(cpu_gpr[gprn]);
+}
+
+__attribute__ (( unused ))
+static void spr_write_atbl (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_atbl(cpu_gpr[gprn]);
+}
+
+__attribute__ (( unused ))
+static void spr_write_atbu (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_atbu(cpu_gpr[gprn]);
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* IBAT0U...IBAT0U */
+/* IBAT0L...IBAT7L */
+static void spr_read_ibat (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+static void spr_read_ibat_h (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, IBAT[sprn & 1][(sprn - SPR_IBAT4U) / 2]));
+}
+
+static void spr_write_ibatu (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_ibatu(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatu_h (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ gen_helper_store_ibatu(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatl (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
+ gen_helper_store_ibatl(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatl_h (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ gen_helper_store_ibatl(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* DBAT0U...DBAT7U */
+/* DBAT0L...DBAT7L */
+static void spr_read_dbat (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
+}
+
+static void spr_read_dbat_h (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
+}
+
+static void spr_write_dbatu (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
+ gen_helper_store_dbatu(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatu_h (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ gen_helper_store_dbatu(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatl (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
+ gen_helper_store_dbatl(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatl_h (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ gen_helper_store_dbatl(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* SDR1 */
+static void spr_read_sdr1 (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, sdr1));
+}
+
+static void spr_write_sdr1 (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_sdr1(cpu_gpr[gprn]);
+}
+
+/* 64 bits PowerPC specific SPRs */
+/* ASR */
+#if defined(TARGET_PPC64)
+static void spr_read_hior (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, excp_prefix));
+}
+
+static void spr_write_hior (void *opaque, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, excp_prefix));
+ tcg_temp_free(t0);
+}
+
+static void spr_read_asr (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, asr));
+}
+
+static void spr_write_asr (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_asr(cpu_gpr[gprn]);
+}
+#endif
+#endif
+
+/* PowerPC 601 specific registers */
+/* RTC */
+static void spr_read_601_rtcl (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcl(cpu_gpr[gprn]);
+}
+
+static void spr_read_601_rtcu (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcu(cpu_gpr[gprn]);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_601_rtcu (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcu(cpu_gpr[gprn]);
+}
+
+static void spr_write_601_rtcl (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcl(cpu_gpr[gprn]);
+}
+
+static void spr_write_hid0_601 (void *opaque, int sprn, int gprn)
+{
+ DisasContext *ctx = opaque;
+
+ gen_helper_store_hid0_601(cpu_gpr[gprn]);
+ /* Must stop the translation as endianness may have changed */
+ gen_stop_exception(ctx);
+}
+#endif
+
+/* Unified bats */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_601_ubat (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+static void spr_write_601_ubatu (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batl(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_601_ubatl (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batu(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+#endif
+
+/* PowerPC 40x specific registers */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_40x_pit (void *opaque, int gprn, int sprn)
+{
+ gen_helper_load_40x_pit(cpu_gpr[gprn]);
+}
+
+static void spr_write_40x_pit (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_40x_pit(cpu_gpr[gprn]);
+}
+
+static void spr_write_40x_dbcr0 (void *opaque, int sprn, int gprn)
+{
+ DisasContext *ctx = opaque;
+
+ gen_helper_store_40x_dbcr0(cpu_gpr[gprn]);
+ /* We must stop translation as we may have rebooted */
+ gen_stop_exception(ctx);
+}
+
+static void spr_write_40x_sler (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_40x_sler(cpu_gpr[gprn]);
+}
+
+static void spr_write_booke_tcr (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_booke_tcr(cpu_gpr[gprn]);
+}
+
+static void spr_write_booke_tsr (void *opaque, int sprn, int gprn)
+{
+ gen_helper_store_booke_tsr(cpu_gpr[gprn]);
+}
+#endif
+
+/* PowerPC 403 specific registers */
+/* PBL1 / PBU1 / PBL2 / PBU2 */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_403_pbr (void *opaque, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUState, pb[sprn - SPR_403_PBL1]));
+}
+
+static void spr_write_403_pbr (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1);
+ gen_helper_store_403_pbr(t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_pir (void *opaque, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
+ gen_store_spr(SPR_PIR, t0);
+ tcg_temp_free(t0);
+}
+#endif
+
+/* SPE specific registers */
+static void spr_read_spefscr (void *opaque, int gprn, int sprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUState, spe_fscr));
+ tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_spefscr (void *opaque, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUState, spe_fscr));
+ tcg_temp_free_i32(t0);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Callback used to write the exception vector base */
+static void spr_write_excp_prefix (void *opaque, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, ivpr_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, excp_prefix));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+static void spr_write_excp_vector (void *opaque, int sprn, int gprn)
+{
+ DisasContext *ctx = opaque;
+
+ if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, ivor_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, excp_vectors[sprn - SPR_BOOKE_IVOR0]));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, ivor_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, excp_vectors[sprn - SPR_BOOKE_IVOR32 + 32]));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ } else {
+ printf("Trying to write an unknown exception vector %d %03x\n",
+ sprn, sprn);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+}
+#endif
+
+static inline void vscr_init (CPUPPCState *env, uint32_t val)
+{
+ env->vscr = val;
+ /* Altivec always uses round-to-nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->vec_status);
+ set_flush_to_zero(vscr_nj, &env->vec_status);
+}
+
+#if defined(CONFIG_USER_ONLY)
+#define spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, initial_value) \
+do { \
+ _spr_register(env, num, name, uea_read, uea_write, initial_value); \
+} while (0)
+static inline void _spr_register (CPUPPCState *env, int num,
+ const char *name,
+ void (*uea_read)(void *opaque, int gprn, int sprn),
+ void (*uea_write)(void *opaque, int sprn, int gprn),
+ target_ulong initial_value)
+#else
+static inline void spr_register (CPUPPCState *env, int num,
+ const char *name,
+ void (*uea_read)(void *opaque, int gprn, int sprn),
+ void (*uea_write)(void *opaque, int sprn, int gprn),
+ void (*oea_read)(void *opaque, int gprn, int sprn),
+ void (*oea_write)(void *opaque, int sprn, int gprn),
+ target_ulong initial_value)
+#endif
+{
+ ppc_spr_t *spr;
+
+ spr = &env->spr_cb[num];
+ if (spr->name != NULL ||env-> spr[num] != 0x00000000 ||
+#if !defined(CONFIG_USER_ONLY)
+ spr->oea_read != NULL || spr->oea_write != NULL ||
+#endif
+ spr->uea_read != NULL || spr->uea_write != NULL) {
+ printf("Error: Trying to register SPR %d (%03x) twice !\n", num, num);
+ exit(1);
+ }
+#if defined(PPC_DEBUG_SPR)
+ printf("*** register spr %d (%03x) %s val " TARGET_FMT_lx "\n", num, num,
+ name, initial_value);
+#endif
+ spr->name = name;
+ spr->uea_read = uea_read;
+ spr->uea_write = uea_write;
+#if !defined(CONFIG_USER_ONLY)
+ spr->oea_read = oea_read;
+ spr->oea_write = oea_write;
+#endif
+ env->spr[num] = initial_value;
+}
+
+/* Generic PowerPC SPRs */
+static void gen_spr_generic (CPUPPCState *env)
+{
+ /* Integer processing */
+ spr_register(env, SPR_XER, "XER",
+ &spr_read_xer, &spr_write_xer,
+ &spr_read_xer, &spr_write_xer,
+ 0x00000000);
+ /* Branch contol */
+ spr_register(env, SPR_LR, "LR",
+ &spr_read_lr, &spr_write_lr,
+ &spr_read_lr, &spr_write_lr,
+ 0x00000000);
+ spr_register(env, SPR_CTR, "CTR",
+ &spr_read_ctr, &spr_write_ctr,
+ &spr_read_ctr, &spr_write_ctr,
+ 0x00000000);
+ /* Interrupt processing */
+ spr_register(env, SPR_SRR0, "SRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SRR1, "SRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_SPRG0, "SPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG1, "SPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG2, "SPRG2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG3, "SPRG3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all non-embedded PowerPC, including 601 */
+static void gen_spr_ne_601 (CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ /* Memory management */
+ spr_register(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_sdr1, &spr_write_sdr1,
+ 0x00000000);
+}
+
+/* BATs 0-3 */
+static void gen_low_BATs (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0U, "DBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0L, "DBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1U, "DBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1L, "DBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2U, "DBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2L, "DBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3U, "DBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3L, "DBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* BATs 4-7 */
+static void gen_high_BATs (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT4U, "IBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT4L, "IBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5U, "IBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5L, "IBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6U, "IBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6L, "IBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7U, "IBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7L, "IBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4U, "DBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4L, "DBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5U, "DBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5L, "DBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6U, "DBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6L, "DBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7U, "DBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7L, "DBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* Generic PowerPC time base */
+static void gen_tbl (CPUPPCState *env)
+{
+ spr_register(env, SPR_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, &spr_write_tbu,
+ 0x00000000);
+}
+
+/* Softare table search registers */
+static void gen_6xx_7xx_soft_tlb (CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ spr_register(env, SPR_DMISS, "DMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_DCMP, "DCMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH1, "HASH1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH2, "HASH2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_IMISS, "IMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ICMP, "ICMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_RPA, "RPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+/* SPR common to MPC755 and G2 */
+static void gen_spr_G2_755 (CPUPPCState *env)
+{
+ /* SGPRs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all 7xx PowerPC implementations */
+static void gen_spr_7xx (CPUPPCState *env)
+{
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTC, "ICTC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Performance monitors */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UMMCR0, "UMMCR0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC1, "UPMC1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC2, "UPMC2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC3, "UPMC3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC4, "UPMC4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_thrm (CPUPPCState *env)
+{
+ /* Thermal management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 604 implementation */
+static void gen_spr_604 (CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Performance counters */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 603 implementation */
+static void gen_spr_603 (CPUPPCState *env)
+{
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC G2 implementation */
+static void gen_spr_G2 (CPUPPCState *env)
+{
+ /* Memory base address */
+ /* MBAR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MBAR, "MBAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Exception processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 602 implementation */
+static void gen_spr_602 (CPUPPCState *env)
+{
+ /* ESA registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SER, "SER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SEBR, "SEBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_ESASRR, "ESASRR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Floating point status */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SP, "SP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_LT, "LT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Watchdog timer */
+ /* XXX : not implemented */
+ spr_register(env, SPR_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Interrupt base */
+ spr_register(env, SPR_IBR, "IBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 601 implementation */
+static void gen_spr_601 (CPUPPCState *env)
+{
+ /* Multiplication/division register */
+ /* MQ */
+ spr_register(env, SPR_MQ, "MQ",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* RTC registers */
+ spr_register(env, SPR_601_RTCU, "RTCU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcu,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCU, "RTCU",
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_601_RTCL, "RTCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcl,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCL, "RTCL",
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ 0x00000000);
+ /* Timer */
+#if 0 /* ? */
+ spr_register(env, SPR_601_UDECR, "UDECR",
+ &spr_read_decr, SPR_NOACCESS,
+ &spr_read_decr, SPR_NOACCESS,
+ 0x00000000);
+#endif
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ env->nb_BATs = 4;
+#endif
+}
+
+static void gen_spr_74xx (CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMCR2, "MMCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UMMCR2, "UMMCR2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_BAMR, "BAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR0, "MSSCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Altivec */
+ spr_register(env, SPR_VRSAVE, "VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Not strictly an SPR */
+ vscr_init(env, 0x00010000);
+}
+
+static void gen_l3_ctrl (CPUPPCState *env)
+{
+ /* L3CR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3CR, "L3CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR0, "L3ITCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3PM */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3PM, "L3PM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_74xx_soft_tlb (CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTEHI, "PTEHI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTELO, "PTELO",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_TLBMISS, "TLBMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+static void gen_spr_usprgh (CPUPPCState *env)
+{
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+/* PowerPC BookE SPR */
+static void gen_spr_BookE (CPUPPCState *env, uint64_t ivor_mask)
+{
+ const char *ivor_names[64] = {
+ "IVOR0", "IVOR1", "IVOR2", "IVOR3",
+ "IVOR4", "IVOR5", "IVOR6", "IVOR7",
+ "IVOR8", "IVOR9", "IVOR10", "IVOR11",
+ "IVOR12", "IVOR13", "IVOR14", "IVOR15",
+ "IVOR16", "IVOR17", "IVOR18", "IVOR19",
+ "IVOR20", "IVOR21", "IVOR22", "IVOR23",
+ "IVOR24", "IVOR25", "IVOR26", "IVOR27",
+ "IVOR28", "IVOR29", "IVOR30", "IVOR31",
+ "IVOR32", "IVOR33", "IVOR34", "IVOR35",
+ "IVOR36", "IVOR37", "IVOR38", "IVOR39",
+ "IVOR40", "IVOR41", "IVOR42", "IVOR43",
+ "IVOR44", "IVOR45", "IVOR46", "IVOR47",
+ "IVOR48", "IVOR49", "IVOR50", "IVOR51",
+ "IVOR52", "IVOR53", "IVOR54", "IVOR55",
+ "IVOR56", "IVOR57", "IVOR58", "IVOR59",
+ "IVOR60", "IVOR61", "IVOR62", "IVOR63",
+ };
+#define SPR_BOOKE_IVORxx (-1)
+ int ivor_sprn[64] = {
+ SPR_BOOKE_IVOR0, SPR_BOOKE_IVOR1, SPR_BOOKE_IVOR2, SPR_BOOKE_IVOR3,
+ SPR_BOOKE_IVOR4, SPR_BOOKE_IVOR5, SPR_BOOKE_IVOR6, SPR_BOOKE_IVOR7,
+ SPR_BOOKE_IVOR8, SPR_BOOKE_IVOR9, SPR_BOOKE_IVOR10, SPR_BOOKE_IVOR11,
+ SPR_BOOKE_IVOR12, SPR_BOOKE_IVOR13, SPR_BOOKE_IVOR14, SPR_BOOKE_IVOR15,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVOR32, SPR_BOOKE_IVOR33, SPR_BOOKE_IVOR34, SPR_BOOKE_IVOR35,
+ SPR_BOOKE_IVOR36, SPR_BOOKE_IVOR37, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ };
+ int i;
+
+ /* Interrupt processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR2, "DBCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_IVPR, "IVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ /* Exception vectors */
+ for (i = 0; i < 64; i++) {
+ if (ivor_mask & (1ULL << i)) {
+ if (ivor_sprn[i] == SPR_BOOKE_IVORxx) {
+ fprintf(stderr, "ERROR: IVOR %d SPR is not defined\n", i);
+ exit(1);
+ }
+ spr_register(env, ivor_sprn[i], ivor_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_vector,
+ 0x00000000);
+ }
+ }
+ spr_register(env, SPR_BOOKE_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DECAR, "DECAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* FSL storage control registers */
+static void gen_spr_BookE_FSL (CPUPPCState *env, uint32_t mas_mask)
+{
+#if !defined(CONFIG_USER_ONLY)
+ const char *mas_names[8] = {
+ "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7",
+ };
+ int mas_sprn[8] = {
+ SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3,
+ SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7,
+ };
+ int i;
+
+ /* TLB assist registers */
+ /* XXX : not implemented */
+ for (i = 0; i < 8; i++) {
+ if (mas_mask & (1 << i)) {
+ spr_register(env, mas_sprn[i], mas_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ }
+ }
+ if (env->nb_pids > 1) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID1, "PID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ }
+ if (env->nb_pids > 2) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID2, "PID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ }
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ switch (env->nb_ways) {
+ case 4:
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* Fallthru */
+ case 3:
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* Fallthru */
+ case 2:
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* Fallthru */
+ case 1:
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* Fallthru */
+ case 0:
+ default:
+ break;
+ }
+#endif
+}
+
+/* SPR specific to PowerPC 440 implementation */
+static void gen_spr_440 (CPUPPCState *env)
+{
+ /* Cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV0, "DNV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV1, "DNV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV2, "DNV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV3, "DNV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV0, "DTV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV1, "DTV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV2, "DTV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV3, "DTV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DVLIM, "DVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV0, "INV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV1, "INV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV2, "INV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV3, "INV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV0, "ITV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV1, "ITV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV2, "ITV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV3, "ITV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_IVLIM, "IVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DBDR, "DBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_440_RSTCFG, "RSTCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Storage control */
+ spr_register(env, SPR_440_MMUCR, "MMUCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR shared between PowerPC 40x implementations */
+static void gen_spr_40x (CPUPPCState *env)
+{
+ /* Cache */
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_40x_DCCR, "DCCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_40x_ICCR, "ICCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Exception */
+ spr_register(env, SPR_40x_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_EVPR, "EVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR2, "SRR2",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR3, "SRR3",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Timers */
+ spr_register(env, SPR_40x_PIT, "PIT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_40x_pit, &spr_write_40x_pit,
+ 0x00000000);
+ spr_register(env, SPR_40x_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_40x_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 405 implementation */
+static void gen_spr_405 (CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00700000);
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_SU0R, "SU0R",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* SPRG */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ gen_spr_usprgh(env);
+}
+
+/* SPR shared between PowerPC 401 & 403 implementations */
+static void gen_spr_401_403 (CPUPPCState *env)
+{
+ /* Time base */
+ spr_register(env, SPR_403_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBL, "TBL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_403_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBU, "TBU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu,
+ 0x00000000);
+ /* Debug */
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_403_CDBCR, "CDBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 401 implementation */
+static void gen_spr_401 (CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ /* not emulated, as Qemu never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_401x2 (CPUPPCState *env)
+{
+ gen_spr_401(env);
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 403 implementation */
+static void gen_spr_403 (CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_403_real (CPUPPCState *env)
+{
+ spr_register(env, SPR_403_PBL1, "PBL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU1, "PBU1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBL2, "PBL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU2, "PBU2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+}
+
+static void gen_spr_403_mmu (CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC compression coprocessor extension */
+static void gen_spr_compress (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_401_SKR, "SKR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+#if defined (TARGET_PPC64)
+/* SPR specific to PowerPC 620 */
+static void gen_spr_620 (CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ spr_register(env, SPR_ASR, "ASR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_asr, &spr_write_asr,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMC1R, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_620_PMC1W, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMC2R, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_620_PMC2W, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_MMCR0R, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_620_MMCR0W, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if 0 // XXX: check this
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR0, "PMR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR1, "PMR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR2, "PMR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR3, "PMR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR4, "PMR4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR5, "PMR5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR6, "PMR6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR7, "PMR7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR8, "PMR8",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMR9, "PMR9",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRA, "PMR10",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRB, "PMR11",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRC, "PMR12",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRD, "PMR13",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRE, "PMR14",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_PMRF, "PMR15",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_BUSCSR, "BUSCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_620_L2SR, "L2SR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+#endif /* defined (TARGET_PPC64) */
+
+static void gen_spr_5xx_8xx (CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EIE, "EIE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EID, "EID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_NRI, "NRI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPA, "CMPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPB, "CMPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPC, "CMPC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPD, "CMPD",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_ECR, "ECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DER, "DER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTA, "COUNTA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTB, "COUNTB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPE, "CMPE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPF, "CMPF",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPG, "CMPG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPH, "CMPH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL1, "LCTRL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL2, "LCTRL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_BAR, "BAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DPDR, "DPDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IMMR, "IMMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_5xx (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_FPECR, "FPECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_8xx (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_CST, "IC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_ADR, "IC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_DAT, "IC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_CST, "DC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_ADR, "DC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_DAT, "DC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_CTR, "MI_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_AP, "MI_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_EPN, "MI_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_TWC, "MI_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_RPN, "MI_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CTR, "MD_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CASID, "MD_CASID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_AP, "MD_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_EPN, "MD_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWB, "MD_TWB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWC, "MD_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_RPN, "MD_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TW, "MD_TW",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+// XXX: TODO
+/*
+ * AMR => SPR 29 (Power 2.04)
+ * CTRL => SPR 136 (Power 2.04)
+ * CTRL => SPR 152 (Power 2.04)
+ * SCOMC => SPR 276 (64 bits ?)
+ * SCOMD => SPR 277 (64 bits ?)
+ * TBU40 => SPR 286 (Power 2.04 hypv)
+ * HSPRG0 => SPR 304 (Power 2.04 hypv)
+ * HSPRG1 => SPR 305 (Power 2.04 hypv)
+ * HDSISR => SPR 306 (Power 2.04 hypv)
+ * HDAR => SPR 307 (Power 2.04 hypv)
+ * PURR => SPR 309 (Power 2.04 hypv)
+ * HDEC => SPR 310 (Power 2.04 hypv)
+ * HIOR => SPR 311 (hypv)
+ * RMOR => SPR 312 (970)
+ * HRMOR => SPR 313 (Power 2.04 hypv)
+ * HSRR0 => SPR 314 (Power 2.04 hypv)
+ * HSRR1 => SPR 315 (Power 2.04 hypv)
+ * LPCR => SPR 316 (970)
+ * LPIDR => SPR 317 (970)
+ * EPR => SPR 702 (Power 2.04 emb)
+ * perf => 768-783 (Power 2.04)
+ * perf => 784-799 (Power 2.04)
+ * PPR => SPR 896 (Power 2.04)
+ * EPLC => SPR 947 (Power 2.04 emb)
+ * EPSC => SPR 948 (Power 2.04 emb)
+ * DABRX => 1015 (Power 2.04 hypv)
+ * FPECR => SPR 1022 (?)
+ * ... and more (thermal management, performance counters, ...)
+ */
+
+/*****************************************************************************/
+/* Exception vectors models */
+static void init_excp_4xx_real (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_4xx_softmmu (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_MPC5xx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_MPC8xx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_ITLBE] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_DTLBE] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_G2 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_e200 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFF7UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_BookE (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->hreset_excp_prefix = 0x00000000UL;
+ env->ivor_mask = 0x0000FFE0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_601 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000;
+ env->hreset_excp_prefix = 0xFFF00000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_602 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* XXX: exception prefix has a special behavior on 602 */
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
+ env->hreset_excp_prefix = 0xFFF00000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_603 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_604 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->hreset_excp_prefix = 0xFFF00000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+#if defined(TARGET_PPC64)
+static void init_excp_620 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->hreset_excp_prefix = 0xFFF00000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+#endif /* defined(TARGET_PPC64) */
+
+static void init_excp_7x0 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_750cl (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_750cx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+/* XXX: Check if this is correct */
+static void init_excp_7x5 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_7400 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_7450 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ env->hreset_excp_prefix = 0x00000000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+#if defined (TARGET_PPC64)
+static void init_excp_970 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800;
+ env->hreset_excp_prefix = 0x00000000FFF00000ULL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+#endif
+
+/*****************************************************************************/
+/* Power management enable checks */
+static int check_pow_none (CPUPPCState *env)
+{
+ return 0;
+}
+
+static int check_pow_nocheck (CPUPPCState *env)
+{
+ return 1;
+}
+
+static int check_pow_hid0 (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00E00000)
+ return 1;
+
+ return 0;
+}
+
+static int check_pow_hid0_74xx (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000)
+ return 1;
+
+ return 0;
+}
+
+/*****************************************************************************/
+/* PowerPC implementations definitions */
+
+/* PowerPC 401 */
+#define POWERPC_INSNS_401 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_WRTEE | PPC_DCR | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_401 (0x00000000000FD201ULL)
+#define POWERPC_MMU_401 (POWERPC_MMU_REAL)
+#define POWERPC_EXCP_401 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_401 (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_401 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_401 (POWERPC_FLAG_CE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_401 check_pow_nocheck
+
+static void init_proc_401 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 401x2 */
+#define POWERPC_INSNS_401x2 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_401x2 (0x00000000001FD231ULL)
+#define POWERPC_MMU_401x2 (POWERPC_MMU_SOFT_4xx_Z)
+#define POWERPC_EXCP_401x2 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_401x2 (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_401x2 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_401x2 (POWERPC_FLAG_CE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_401x2 check_pow_nocheck
+
+static void init_proc_401x2 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 401x3 */
+#define POWERPC_INSNS_401x3 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_401x3 (0x00000000001FD631ULL)
+#define POWERPC_MMU_401x3 (POWERPC_MMU_SOFT_4xx_Z)
+#define POWERPC_EXCP_401x3 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_401x3 (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_401x3 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_401x3 (POWERPC_FLAG_CE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_401x3 check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_401x3 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* IOP480 */
+#define POWERPC_INSNS_IOP480 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_IOP480 (0x00000000001FD231ULL)
+#define POWERPC_MMU_IOP480 (POWERPC_MMU_SOFT_4xx_Z)
+#define POWERPC_EXCP_IOP480 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_IOP480 (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_IOP480 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_IOP480 (POWERPC_FLAG_CE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_IOP480 check_pow_nocheck
+
+static void init_proc_IOP480 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 403 */
+#define POWERPC_INSNS_403 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_403 (0x000000000007D00DULL)
+#define POWERPC_MMU_403 (POWERPC_MMU_REAL)
+#define POWERPC_EXCP_403 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_403 (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_403 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_403 (POWERPC_FLAG_CE | POWERPC_FLAG_PX | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_403 check_pow_nocheck
+
+static void init_proc_403 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_403(env);
+ gen_spr_403_real(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 403 GCX */
+#define POWERPC_INSNS_403GCX (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \
+ PPC_4xx_COMMON | PPC_40x_EXCP)
+#define POWERPC_MSRM_403GCX (0x000000000007D00DULL)
+#define POWERPC_MMU_403GCX (POWERPC_MMU_SOFT_4xx_Z)
+#define POWERPC_EXCP_403GCX (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_403GCX (PPC_FLAGS_INPUT_401)
+#define POWERPC_BFDM_403GCX (bfd_mach_ppc_403)
+#define POWERPC_FLAG_403GCX (POWERPC_FLAG_CE | POWERPC_FLAG_PX | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_403GCX check_pow_nocheck
+
+static void init_proc_403GCX (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_403(env);
+ gen_spr_403_real(env);
+ gen_spr_403_mmu(env);
+ /* Bus access control */
+ /* not emulated, as Qemu never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 405 */
+#define POWERPC_INSNS_405 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \
+ PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP)
+#define POWERPC_MSRM_405 (0x000000000006E630ULL)
+#define POWERPC_MMU_405 (POWERPC_MMU_SOFT_4xx)
+#define POWERPC_EXCP_405 (POWERPC_EXCP_40x)
+#define POWERPC_INPUT_405 (PPC_FLAGS_INPUT_405)
+#define POWERPC_BFDM_405 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_405 (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_405 check_pow_nocheck
+
+static void init_proc_405 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_40x(env);
+ gen_spr_405(env);
+ /* Bus access control */
+ /* not emulated, as Qemu never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as Qemu do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 440 EP */
+#define POWERPC_INSNS_440EP (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_MFTB | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_440EP (0x000000000006D630ULL)
+#define POWERPC_MMU_440EP (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_440EP (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_440EP (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_440EP (bfd_mach_ppc_403)
+#define POWERPC_FLAG_440EP (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_440EP check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_440EP (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* PowerPC 440 GP */
+#define POWERPC_INSNS_440GP (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_440GP (0x000000000006FF30ULL)
+#define POWERPC_MMU_440GP (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_440GP (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_440GP (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_440GP (bfd_mach_ppc_403)
+#define POWERPC_FLAG_440GP (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_440GP check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_440GP (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* PowerPC 440x4 */
+#define POWERPC_INSNS_440x4 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_MFTB | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_440x4 (0x000000000006FF30ULL)
+#define POWERPC_MMU_440x4 (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_440x4 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_440x4 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_440x4 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_440x4 (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_440x4 check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_440x4 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* PowerPC 440x5 */
+#define POWERPC_INSNS_440x5 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_MFTB | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_440x5 (0x000000000006FF30ULL)
+#define POWERPC_MMU_440x5 (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_440x5 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_440x5 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_440x5 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_440x5 (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_440x5 check_pow_nocheck
+
+static void init_proc_440x5 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ ppc40x_irq_init(env);
+}
+
+/* PowerPC 460 (guessed) */
+#define POWERPC_INSNS_460 (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_DCR | PPC_DCRX | PPC_DCRUX | \
+ PPC_WRTEE | PPC_MFAPIDI | PPC_MFTB | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVA | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_460 (0x000000000006FF30ULL)
+#define POWERPC_MMU_460 (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_460 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_460 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_460 (bfd_mach_ppc_403)
+#define POWERPC_FLAG_460 (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_460 check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_460 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DCRIPR, "SPR_DCRIPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* PowerPC 460F (guessed) */
+#define POWERPC_INSNS_460F (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | PPC_MFTB | \
+ PPC_DCR | PPC_DCRX | PPC_DCRUX | \
+ PPC_WRTEE | PPC_MFAPIDI | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVA | \
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \
+ PPC_440_SPEC)
+#define POWERPC_MSRM_460 (0x000000000006FF30ULL)
+#define POWERPC_MMU_460F (POWERPC_MMU_BOOKE)
+#define POWERPC_EXCP_460F (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_460F (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_460F (bfd_mach_ppc_403)
+#define POWERPC_FLAG_460F (POWERPC_FLAG_CE | POWERPC_FLAG_DWE | \
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_460F check_pow_nocheck
+
+__attribute__ (( unused ))
+static void init_proc_460F (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DCRIPR, "SPR_DCRIPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* Freescale 5xx cores (aka RCPU) */
+#define POWERPC_INSNS_MPC5xx (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_MEM_EIEIO | PPC_MEM_SYNC | \
+ PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX | \
+ PPC_MFTB)
+#define POWERPC_MSRM_MPC5xx (0x000000000001FF43ULL)
+#define POWERPC_MMU_MPC5xx (POWERPC_MMU_REAL)
+#define POWERPC_EXCP_MPC5xx (POWERPC_EXCP_603)
+#define POWERPC_INPUT_MPC5xx (PPC_FLAGS_INPUT_RCPU)
+#define POWERPC_BFDM_MPC5xx (bfd_mach_ppc_505)
+#define POWERPC_FLAG_MPC5xx (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_MPC5xx check_pow_none
+
+__attribute__ (( unused ))
+static void init_proc_MPC5xx (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_5xx_8xx(env);
+ gen_spr_5xx(env);
+ init_excp_MPC5xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* Freescale 8xx cores (aka PowerQUICC) */
+#define POWERPC_INSNS_MPC8xx (PPC_INSNS_BASE | PPC_STRING | \
+ PPC_MEM_EIEIO | PPC_MEM_SYNC | \
+ PPC_CACHE_ICBI | PPC_MFTB)
+#define POWERPC_MSRM_MPC8xx (0x000000000001F673ULL)
+#define POWERPC_MMU_MPC8xx (POWERPC_MMU_MPC8xx)
+#define POWERPC_EXCP_MPC8xx (POWERPC_EXCP_603)
+#define POWERPC_INPUT_MPC8xx (PPC_FLAGS_INPUT_RCPU)
+#define POWERPC_BFDM_MPC8xx (bfd_mach_ppc_860)
+#define POWERPC_FLAG_MPC8xx (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_MPC8xx check_pow_none
+
+__attribute__ (( unused ))
+static void init_proc_MPC8xx (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_5xx_8xx(env);
+ gen_spr_8xx(env);
+ init_excp_MPC8xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* Freescale 82xx cores (aka PowerQUICC-II) */
+/* PowerPC G2 */
+#define POWERPC_INSNS_G2 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_G2 (0x000000000006FFF2ULL)
+#define POWERPC_MMU_G2 (POWERPC_MMU_SOFT_6xx)
+//#define POWERPC_EXCP_G2 (POWERPC_EXCP_G2)
+#define POWERPC_INPUT_G2 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_G2 (bfd_mach_ppc_ec603e)
+#define POWERPC_FLAG_G2 (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_G2 check_pow_hid0
+
+static void init_proc_G2 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_G2_755(env);
+ gen_spr_G2(env);
+ /* Time base */
+ gen_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC G2LE */
+#define POWERPC_INSNS_G2LE (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_G2LE (0x000000000007FFF3ULL)
+#define POWERPC_MMU_G2LE (POWERPC_MMU_SOFT_6xx)
+#define POWERPC_EXCP_G2LE (POWERPC_EXCP_G2)
+#define POWERPC_INPUT_G2LE (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_G2LE (bfd_mach_ppc_ec603e)
+#define POWERPC_FLAG_G2LE (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_G2LE check_pow_hid0
+
+static void init_proc_G2LE (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_G2_755(env);
+ gen_spr_G2(env);
+ /* Time base */
+ gen_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* e200 core */
+/* XXX: unimplemented instructions:
+ * dcblc
+ * dcbtlst
+ * dcbtstls
+ * icblc
+ * icbtls
+ * tlbivax
+ * all SPE multiply-accumulate instructions
+ */
+#define POWERPC_INSNS_e200 (PPC_INSNS_BASE | PPC_ISEL | \
+ PPC_SPE | PPC_SPE_SINGLE | \
+ PPC_WRTEE | PPC_RFDI | \
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | \
+ PPC_BOOKE)
+#define POWERPC_MSRM_e200 (0x000000000606FF30ULL)
+#define POWERPC_MMU_e200 (POWERPC_MMU_BOOKE_FSL)
+#define POWERPC_EXCP_e200 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_e200 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_e200 (bfd_mach_ppc_860)
+#define POWERPC_FLAG_e200 (POWERPC_FLAG_SPE | POWERPC_FLAG_CE | \
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_e200 check_pow_hid0
+
+__attribute__ (( unused ))
+static void init_proc_e200 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000070000FFFFULL);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+ /* Memory management */
+ gen_spr_BookE_FSL(env, 0x0000005D);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_CTXCR, "CTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCNT, "DBCNT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCR3, "DBCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_e200(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+/* e300 core */
+#define POWERPC_INSNS_e300 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_e300 (0x000000000007FFF3ULL)
+#define POWERPC_MMU_e300 (POWERPC_MMU_SOFT_6xx)
+#define POWERPC_EXCP_e300 (POWERPC_EXCP_603)
+#define POWERPC_INPUT_e300 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_e300 (bfd_mach_ppc_603)
+#define POWERPC_FLAG_e300 (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_e300 check_pow_hid0
+
+__attribute__ (( unused ))
+static void init_proc_e300 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* e500v1 core */
+#define POWERPC_INSNS_e500v1 (PPC_INSNS_BASE | PPC_ISEL | \
+ PPC_SPE | PPC_SPE_SINGLE | \
+ PPC_WRTEE | PPC_RFDI | \
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | \
+ PPC_BOOKE)
+#define POWERPC_MSRM_e500v1 (0x000000000606FF30ULL)
+#define POWERPC_MMU_e500v1 (POWERPC_MMU_BOOKE_FSL)
+#define POWERPC_EXCP_e500v1 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_e500v1 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_e500v1 (bfd_mach_ppc_860)
+#define POWERPC_FLAG_e500v1 (POWERPC_FLAG_SPE | POWERPC_FLAG_CE | \
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_e500v1 check_pow_hid0
+#define init_proc_e500v1 init_proc_e500
+
+/* e500v2 core */
+#define POWERPC_INSNS_e500v2 (PPC_INSNS_BASE | PPC_ISEL | \
+ PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE | \
+ PPC_WRTEE | PPC_RFDI | \
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | \
+ PPC_BOOKE)
+#define POWERPC_MSRM_e500v2 (0x000000000606FF30ULL)
+#define POWERPC_MMU_e500v2 (POWERPC_MMU_BOOKE_FSL)
+#define POWERPC_EXCP_e500v2 (POWERPC_EXCP_BOOKE)
+#define POWERPC_INPUT_e500v2 (PPC_FLAGS_INPUT_BookE)
+#define POWERPC_BFDM_e500v2 (bfd_mach_ppc_860)
+#define POWERPC_FLAG_e500v2 (POWERPC_FLAG_SPE | POWERPC_FLAG_CE | \
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_e500v2 check_pow_hid0
+#define init_proc_e500v2 init_proc_e500
+
+static void init_proc_e500 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x0000000F0000FD7FULL);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_pids = 3;
+#endif
+ gen_spr_BookE_FSL(env, 0x0000005F);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBEAR, "BBEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBTAR, "BBTAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_MCAR, "MCAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_NPIDR, "NPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+#endif
+ init_excp_e200(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppce500_irq_init(env);
+}
+
+/* Non-embedded PowerPC */
+
+/* POWER : same as 601, without mfmsr, mfsr */
+#if defined(TODO)
+#define POWERPC_INSNS_POWER (XXX_TODO)
+/* POWER RSC (from RAD6000) */
+#define POWERPC_MSRM_POWER (0x00000000FEF0ULL)
+#endif /* TODO */
+
+/* PowerPC 601 */
+#define POWERPC_INSNS_601 (PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | \
+ PPC_FLOAT | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_601 (0x000000000000FD70ULL)
+#define POWERPC_MSRR_601 (0x0000000000001040ULL)
+//#define POWERPC_MMU_601 (POWERPC_MMU_601)
+//#define POWERPC_EXCP_601 (POWERPC_EXCP_601)
+#define POWERPC_INPUT_601 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_601 (bfd_mach_ppc_601)
+#define POWERPC_FLAG_601 (POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK)
+#define check_pow_601 check_pow_none
+
+static void init_proc_601 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_601(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hid0_601,
+ 0x80010080);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ init_excp_601(env);
+ /* XXX: beware that dcache line size is 64
+ * but dcbz uses 32 bytes "sectors"
+ * XXX: this breaks clcs instruction !
+ */
+ env->dcache_line_size = 32;
+ env->icache_line_size = 64;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 601v */
+#define POWERPC_INSNS_601v (PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | \
+ PPC_FLOAT | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_601v (0x000000000000FD70ULL)
+#define POWERPC_MSRR_601v (0x0000000000001040ULL)
+#define POWERPC_MMU_601v (POWERPC_MMU_601)
+#define POWERPC_EXCP_601v (POWERPC_EXCP_601)
+#define POWERPC_INPUT_601v (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_601v (bfd_mach_ppc_601)
+#define POWERPC_FLAG_601v (POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK)
+#define check_pow_601v check_pow_none
+
+static void init_proc_601v (CPUPPCState *env)
+{
+ init_proc_601(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID15, "HID15",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* PowerPC 602 */
+#define POWERPC_INSNS_602 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_602_SPEC)
+#define POWERPC_MSRM_602 (0x0000000000C7FF73ULL)
+/* XXX: 602 MMU is quite specific. Should add a special case */
+#define POWERPC_MMU_602 (POWERPC_MMU_SOFT_6xx)
+//#define POWERPC_EXCP_602 (POWERPC_EXCP_602)
+#define POWERPC_INPUT_602 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_602 (bfd_mach_ppc_602)
+#define POWERPC_FLAG_602 (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_602 check_pow_hid0
+
+static void init_proc_602 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_602(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_602(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 603 */
+#define POWERPC_INSNS_603 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_603 (0x000000000007FF73ULL)
+#define POWERPC_MMU_603 (POWERPC_MMU_SOFT_6xx)
+//#define POWERPC_EXCP_603 (POWERPC_EXCP_603)
+#define POWERPC_INPUT_603 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_603 (bfd_mach_ppc_603)
+#define POWERPC_FLAG_603 (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_603 check_pow_hid0
+
+static void init_proc_603 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 603e */
+#define POWERPC_INSNS_603E (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_603E (0x000000000007FF73ULL)
+#define POWERPC_MMU_603E (POWERPC_MMU_SOFT_6xx)
+//#define POWERPC_EXCP_603E (POWERPC_EXCP_603E)
+#define POWERPC_INPUT_603E (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_603E (bfd_mach_ppc_ec603e)
+#define POWERPC_FLAG_603E (POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK)
+#define check_pow_603E check_pow_hid0
+
+static void init_proc_603E (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 604 */
+#define POWERPC_INSNS_604 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_604 (0x000000000005FF77ULL)
+#define POWERPC_MMU_604 (POWERPC_MMU_32B)
+//#define POWERPC_EXCP_604 (POWERPC_EXCP_604)
+#define POWERPC_INPUT_604 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_604 (bfd_mach_ppc_604)
+#define POWERPC_FLAG_604 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_604 check_pow_nocheck
+
+static void init_proc_604 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_604(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 604E */
+#define POWERPC_INSNS_604E (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_604E (0x000000000005FF77ULL)
+#define POWERPC_MMU_604E (POWERPC_MMU_32B)
+#define POWERPC_EXCP_604E (POWERPC_EXCP_604)
+#define POWERPC_INPUT_604E (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_604E (bfd_mach_ppc_604)
+#define POWERPC_FLAG_604E (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_604E check_pow_nocheck
+
+static void init_proc_604E (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_604(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 740 */
+#define POWERPC_INSNS_740 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_740 (0x000000000005FF77ULL)
+#define POWERPC_MMU_740 (POWERPC_MMU_32B)
+#define POWERPC_EXCP_740 (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_740 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_740 (bfd_mach_ppc_750)
+#define POWERPC_FLAG_740 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_740 check_pow_hid0
+
+static void init_proc_740 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 750 */
+#define POWERPC_INSNS_750 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_750 (0x000000000005FF77ULL)
+#define POWERPC_MMU_750 (POWERPC_MMU_32B)
+#define POWERPC_EXCP_750 (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_750 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_750 (bfd_mach_ppc_750)
+#define POWERPC_FLAG_750 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_750 check_pow_hid0
+
+static void init_proc_750 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* XXX: high BATs are also present but are known to be bugged on
+ * die version 1.x
+ */
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 750 CL */
+/* XXX: not implemented:
+ * cache lock instructions:
+ * dcbz_l
+ * floating point paired instructions
+ * psq_lux
+ * psq_lx
+ * psq_stux
+ * psq_stx
+ * ps_abs
+ * ps_add
+ * ps_cmpo0
+ * ps_cmpo1
+ * ps_cmpu0
+ * ps_cmpu1
+ * ps_div
+ * ps_madd
+ * ps_madds0
+ * ps_madds1
+ * ps_merge00
+ * ps_merge01
+ * ps_merge10
+ * ps_merge11
+ * ps_mr
+ * ps_msub
+ * ps_mul
+ * ps_muls0
+ * ps_muls1
+ * ps_nabs
+ * ps_neg
+ * ps_nmadd
+ * ps_nmsub
+ * ps_res
+ * ps_rsqrte
+ * ps_sel
+ * ps_sub
+ * ps_sum0
+ * ps_sum1
+ */
+#define POWERPC_INSNS_750cl (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_750cl (0x000000000005FF77ULL)
+#define POWERPC_MMU_750cl (POWERPC_MMU_32B)
+#define POWERPC_EXCP_750cl (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_750cl (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_750cl (bfd_mach_ppc_750)
+#define POWERPC_FLAG_750cl (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_750cl check_pow_hid0
+
+static void init_proc_750cl (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ /* Those registers are fake on 750CL */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_750_TDCL, "TDCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_TDCH, "TDCH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* DMA */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_WPAR, "WPAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAL, "DMAL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAU, "DMAU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Quantization registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR0, "GQR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR1, "GQR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR2, "GQR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR3, "GQR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR4, "GQR4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR5, "GQR5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR6, "GQR6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR7, "GQR7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750cl has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_750cl(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 750CX */
+#define POWERPC_INSNS_750cx (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_750cx (0x000000000005FF77ULL)
+#define POWERPC_MMU_750cx (POWERPC_MMU_32B)
+#define POWERPC_EXCP_750cx (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_750cx (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_750cx (bfd_mach_ppc_750)
+#define POWERPC_FLAG_750cx (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_750cx check_pow_hid0
+
+static void init_proc_750cx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* This register is not implemented but is present for compatibility */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750cx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_750cx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 750FX */
+#define POWERPC_INSNS_750fx (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_750fx (0x000000000005FF77ULL)
+#define POWERPC_MMU_750fx (POWERPC_MMU_32B)
+#define POWERPC_EXCP_750fx (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_750fx (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_750fx (bfd_mach_ppc_750)
+#define POWERPC_FLAG_750fx (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_750fx check_pow_hid0
+
+static void init_proc_750fx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 750GX */
+#define POWERPC_INSNS_750gx (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_750gx (0x000000000005FF77ULL)
+#define POWERPC_MMU_750gx (POWERPC_MMU_32B)
+#define POWERPC_EXCP_750gx (POWERPC_EXCP_7x0)
+#define POWERPC_INPUT_750gx (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_750gx (bfd_mach_ppc_750)
+#define POWERPC_FLAG_750gx (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_750gx check_pow_hid0
+
+static void init_proc_750gx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 745 */
+#define POWERPC_INSNS_745 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_745 (0x000000000005FF77ULL)
+#define POWERPC_MMU_745 (POWERPC_MMU_SOFT_6xx)
+#define POWERPC_EXCP_745 (POWERPC_EXCP_7x5)
+#define POWERPC_INPUT_745 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_745 (bfd_mach_ppc_750)
+#define POWERPC_FLAG_745 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_745 check_pow_hid0
+
+static void init_proc_745 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ gen_spr_G2_755(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 755 */
+#define POWERPC_INSNS_755 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN)
+#define POWERPC_MSRM_755 (0x000000000005FF77ULL)
+#define POWERPC_MMU_755 (POWERPC_MMU_SOFT_6xx)
+#define POWERPC_EXCP_755 (POWERPC_EXCP_7x5)
+#define POWERPC_INPUT_755 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_755 (bfd_mach_ppc_750)
+#define POWERPC_FLAG_755 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_755 check_pow_hid0
+
+static void init_proc_755 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ gen_spr_G2_755(env);
+ /* Time base */
+ gen_tbl(env);
+ /* L2 cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7400 (aka G4) */
+#define POWERPC_INSNS_7400 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7400 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7400 (POWERPC_MMU_32B)
+#define POWERPC_EXCP_7400 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7400 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7400 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7400 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7400 check_pow_hid0
+
+static void init_proc_7400 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: this seems not implemented on all revisions. */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR1, "MSSCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7410 (aka G4) */
+#define POWERPC_INSNS_7410 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7410 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7410 (POWERPC_MMU_32B)
+#define POWERPC_EXCP_7410 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7410 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7410 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7410 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7410 check_pow_hid0
+
+static void init_proc_7410 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* L2PMCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTDB */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTDB, "LDSTDB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7440 (aka G4) */
+#define POWERPC_INSNS_7440 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | PPC_74xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7440 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7440 (POWERPC_MMU_SOFT_74xx)
+#define POWERPC_EXCP_7440 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7440 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7440 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7440 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7440 check_pow_hid0_74xx
+
+__attribute__ (( unused ))
+static void init_proc_7440 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7450 (aka G4) */
+#define POWERPC_INSNS_7450 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | PPC_74xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7450 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7450 (POWERPC_MMU_SOFT_74xx)
+#define POWERPC_EXCP_7450 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7450 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7450 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7450 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7450 check_pow_hid0_74xx
+
+__attribute__ (( unused ))
+static void init_proc_7450 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7445 (aka G4) */
+#define POWERPC_INSNS_7445 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | PPC_74xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7445 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7445 (POWERPC_MMU_SOFT_74xx)
+#define POWERPC_EXCP_7445 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7445 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7445 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7445 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7445 check_pow_hid0_74xx
+
+__attribute__ (( unused ))
+static void init_proc_7445 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7455 (aka G4) */
+#define POWERPC_INSNS_7455 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | PPC_74xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7455 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7455 (POWERPC_MMU_SOFT_74xx)
+#define POWERPC_EXCP_7455 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7455 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7455 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7455 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7455 check_pow_hid0_74xx
+
+__attribute__ (( unused ))
+static void init_proc_7455 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+/* PowerPC 7457 (aka G4) */
+#define POWERPC_INSNS_7457 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_MEM_TLBIA | PPC_74xx_TLB | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_ALTIVEC)
+#define POWERPC_MSRM_7457 (0x000000000205FF77ULL)
+#define POWERPC_MMU_7457 (POWERPC_MMU_SOFT_74xx)
+#define POWERPC_EXCP_7457 (POWERPC_EXCP_74xx)
+#define POWERPC_INPUT_7457 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_7457 (bfd_mach_ppc_7400)
+#define POWERPC_FLAG_7457 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+#define check_pow_7457 check_pow_hid0_74xx
+
+__attribute__ (( unused ))
+static void init_proc_7457 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+
+#if defined (TARGET_PPC64)
+/* PowerPC 970 */
+#define POWERPC_INSNS_970 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZT | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_64B | PPC_ALTIVEC | \
+ PPC_SEGMENT_64B | PPC_SLBI)
+#define POWERPC_MSRM_970 (0x900000000204FF36ULL)
+#define POWERPC_MMU_970 (POWERPC_MMU_64B)
+//#define POWERPC_EXCP_970 (POWERPC_EXCP_970)
+#define POWERPC_INPUT_970 (PPC_FLAGS_INPUT_970)
+#define POWERPC_BFDM_970 (bfd_mach_ppc64)
+#define POWERPC_FLAG_970 (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC970_HID5_INIT 0x00000080
+#else
+#define POWERPC970_HID5_INIT 0x00000000
+#endif
+
+static int check_pow_970 (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000)
+ return 1;
+
+ return 0;
+}
+
+static void init_proc_970 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ /* XXX: not correct */
+ gen_low_BATs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->slb_nr = 32;
+#endif
+ init_excp_970(env);
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+ /* Allocate hardware IRQ controller */
+ ppc970_irq_init(env);
+ /* Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors. */
+ vscr_init(env, 0x00010000);
+}
+
+/* PowerPC 970FX (aka G5) */
+#define POWERPC_INSNS_970FX (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZT | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_64B | PPC_ALTIVEC | \
+ PPC_SEGMENT_64B | PPC_SLBI)
+#define POWERPC_MSRM_970FX (0x800000000204FF36ULL)
+#define POWERPC_MMU_970FX (POWERPC_MMU_64B)
+#define POWERPC_EXCP_970FX (POWERPC_EXCP_970)
+#define POWERPC_INPUT_970FX (PPC_FLAGS_INPUT_970)
+#define POWERPC_BFDM_970FX (bfd_mach_ppc64)
+#define POWERPC_FLAG_970FX (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+
+static int check_pow_970FX (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000)
+ return 1;
+
+ return 0;
+}
+
+static void init_proc_970FX (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ /* XXX: not correct */
+ gen_low_BATs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+ spr_register(env, SPR_CTRL, "SPR_CTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_UCTRL, "SPR_UCTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_VRSAVE, "SPR_VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->slb_nr = 64;
+#endif
+ init_excp_970(env);
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+ /* Allocate hardware IRQ controller */
+ ppc970_irq_init(env);
+ /* Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors. */
+ vscr_init(env, 0x00010000);
+}
+
+/* PowerPC 970 GX */
+#define POWERPC_INSNS_970GX (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZT | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_64B | PPC_ALTIVEC | \
+ PPC_SEGMENT_64B | PPC_SLBI)
+#define POWERPC_MSRM_970GX (0x800000000204FF36ULL)
+#define POWERPC_MMU_970GX (POWERPC_MMU_64B)
+#define POWERPC_EXCP_970GX (POWERPC_EXCP_970)
+#define POWERPC_INPUT_970GX (PPC_FLAGS_INPUT_970)
+#define POWERPC_BFDM_970GX (bfd_mach_ppc64)
+#define POWERPC_FLAG_970GX (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+
+static int check_pow_970GX (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000)
+ return 1;
+
+ return 0;
+}
+
+static void init_proc_970GX (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ /* XXX: not correct */
+ gen_low_BATs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->slb_nr = 32;
+#endif
+ init_excp_970(env);
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+ /* Allocate hardware IRQ controller */
+ ppc970_irq_init(env);
+ /* Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors. */
+ vscr_init(env, 0x00010000);
+}
+
+/* PowerPC 970 MP */
+#define POWERPC_INSNS_970MP (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZT | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_64B | PPC_ALTIVEC | \
+ PPC_SEGMENT_64B | PPC_SLBI)
+#define POWERPC_MSRM_970MP (0x900000000204FF36ULL)
+#define POWERPC_MMU_970MP (POWERPC_MMU_64B)
+#define POWERPC_EXCP_970MP (POWERPC_EXCP_970)
+#define POWERPC_INPUT_970MP (PPC_FLAGS_INPUT_970)
+#define POWERPC_BFDM_970MP (bfd_mach_ppc64)
+#define POWERPC_FLAG_970MP (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | \
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM | \
+ POWERPC_FLAG_BUS_CLK)
+
+static int check_pow_970MP (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x01C00000)
+ return 1;
+
+ return 0;
+}
+
+static void init_proc_970MP (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ /* XXX: not correct */
+ gen_low_BATs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->slb_nr = 32;
+#endif
+ init_excp_970(env);
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+ /* Allocate hardware IRQ controller */
+ ppc970_irq_init(env);
+ /* Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors. */
+ vscr_init(env, 0x00010000);
+}
+
+/* PowerPC 620 */
+#define POWERPC_INSNS_620 (PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | \
+ PPC_FLOAT_STFIWX | \
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | \
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \
+ PPC_SEGMENT | PPC_EXTERN | \
+ PPC_64B | PPC_SLBI)
+#define POWERPC_MSRM_620 (0x800000000005FF77ULL)
+//#define POWERPC_MMU_620 (POWERPC_MMU_620)
+#define POWERPC_EXCP_620 (POWERPC_EXCP_970)
+#define POWERPC_INPUT_620 (PPC_FLAGS_INPUT_6xx)
+#define POWERPC_BFDM_620 (bfd_mach_ppc64)
+#define POWERPC_FLAG_620 (POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK)
+#define check_pow_620 check_pow_nocheck /* Check this */
+
+__attribute__ (( unused ))
+static void init_proc_620 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_620(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_620(env);
+ env->dcache_line_size = 64;
+ env->icache_line_size = 64;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env);
+}
+#endif /* defined (TARGET_PPC64) */
+
+/* Default 32 bits PowerPC target will be 604 */
+#define CPU_POWERPC_PPC32 CPU_POWERPC_604
+#define POWERPC_INSNS_PPC32 POWERPC_INSNS_604
+#define POWERPC_MSRM_PPC32 POWERPC_MSRM_604
+#define POWERPC_MMU_PPC32 POWERPC_MMU_604
+#define POWERPC_EXCP_PPC32 POWERPC_EXCP_604
+#define POWERPC_INPUT_PPC32 POWERPC_INPUT_604
+#define POWERPC_BFDM_PPC32 POWERPC_BFDM_604
+#define POWERPC_FLAG_PPC32 POWERPC_FLAG_604
+#define check_pow_PPC32 check_pow_604
+#define init_proc_PPC32 init_proc_604
+
+/* Default 64 bits PowerPC target will be 970 FX */
+#define CPU_POWERPC_PPC64 CPU_POWERPC_970FX
+#define POWERPC_INSNS_PPC64 POWERPC_INSNS_970FX
+#define POWERPC_MSRM_PPC64 POWERPC_MSRM_970FX
+#define POWERPC_MMU_PPC64 POWERPC_MMU_970FX
+#define POWERPC_EXCP_PPC64 POWERPC_EXCP_970FX
+#define POWERPC_INPUT_PPC64 POWERPC_INPUT_970FX
+#define POWERPC_BFDM_PPC64 POWERPC_BFDM_970FX
+#define POWERPC_FLAG_PPC64 POWERPC_FLAG_970FX
+#define check_pow_PPC64 check_pow_970FX
+#define init_proc_PPC64 init_proc_970FX
+
+/* Default PowerPC target will be PowerPC 32 */
+#if defined (TARGET_PPC64) && 0 // XXX: TODO
+#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC64
+#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC64
+#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC64
+#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC64
+#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC64
+#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC64
+#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC64
+#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC64
+#define check_pow_DEFAULT check_pow_PPC64
+#define init_proc_DEFAULT init_proc_PPC64
+#else
+#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC32
+#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC32
+#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC32
+#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC32
+#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC32
+#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC32
+#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC32
+#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC32
+#define check_pow_DEFAULT check_pow_PPC32
+#define init_proc_DEFAULT init_proc_PPC32
+#endif
+
+/*****************************************************************************/
+/* PVR definitions for most known PowerPC */
+enum {
+ /* PowerPC 401 family */
+ /* Generic PowerPC 401 */
+#define CPU_POWERPC_401 CPU_POWERPC_401G2
+ /* PowerPC 401 cores */
+ CPU_POWERPC_401A1 = 0x00210000,
+ CPU_POWERPC_401B2 = 0x00220000,
+#if 0
+ CPU_POWERPC_401B3 = xxx,
+#endif
+ CPU_POWERPC_401C2 = 0x00230000,
+ CPU_POWERPC_401D2 = 0x00240000,
+ CPU_POWERPC_401E2 = 0x00250000,
+ CPU_POWERPC_401F2 = 0x00260000,
+ CPU_POWERPC_401G2 = 0x00270000,
+ /* PowerPC 401 microcontrolers */
+#if 0
+ CPU_POWERPC_401GF = xxx,
+#endif
+#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2
+ /* IBM Processor for Network Resources */
+ CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */
+#if 0
+ CPU_POWERPC_XIPCHIP = xxx,
+#endif
+ /* PowerPC 403 family */
+ /* Generic PowerPC 403 */
+#define CPU_POWERPC_403 CPU_POWERPC_403GC
+ /* PowerPC 403 microcontrollers */
+ CPU_POWERPC_403GA = 0x00200011,
+ CPU_POWERPC_403GB = 0x00200100,
+ CPU_POWERPC_403GC = 0x00200200,
+ CPU_POWERPC_403GCX = 0x00201400,
+#if 0
+ CPU_POWERPC_403GP = xxx,
+#endif
+ /* PowerPC 405 family */
+ /* Generic PowerPC 405 */
+#define CPU_POWERPC_405 CPU_POWERPC_405D4
+ /* PowerPC 405 cores */
+#if 0
+ CPU_POWERPC_405A3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405A4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405B3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405B4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405C3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405C4 = xxx,
+#endif
+ CPU_POWERPC_405D2 = 0x20010000,
+#if 0
+ CPU_POWERPC_405D3 = xxx,
+#endif
+ CPU_POWERPC_405D4 = 0x41810000,
+#if 0
+ CPU_POWERPC_405D5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405E4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F6 = xxx,
+#endif
+ /* PowerPC 405 microcontrolers */
+ /* XXX: missing 0x200108a0 */
+#define CPU_POWERPC_405CR CPU_POWERPC_405CRc
+ CPU_POWERPC_405CRa = 0x40110041,
+ CPU_POWERPC_405CRb = 0x401100C5,
+ CPU_POWERPC_405CRc = 0x40110145,
+ CPU_POWERPC_405EP = 0x51210950,
+#if 0
+ CPU_POWERPC_405EXr = xxx,
+#endif
+ CPU_POWERPC_405EZ = 0x41511460, /* 0x51210950 ? */
+#if 0
+ CPU_POWERPC_405FX = xxx,
+#endif
+#define CPU_POWERPC_405GP CPU_POWERPC_405GPd
+ CPU_POWERPC_405GPa = 0x40110000,
+ CPU_POWERPC_405GPb = 0x40110040,
+ CPU_POWERPC_405GPc = 0x40110082,
+ CPU_POWERPC_405GPd = 0x401100C4,
+#define CPU_POWERPC_405GPe CPU_POWERPC_405CRc
+ CPU_POWERPC_405GPR = 0x50910951,
+#if 0
+ CPU_POWERPC_405H = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405L = xxx,
+#endif
+ CPU_POWERPC_405LP = 0x41F10000,
+#if 0
+ CPU_POWERPC_405PM = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405PS = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405S = xxx,
+#endif
+ /* IBM network processors */
+ CPU_POWERPC_NPE405H = 0x414100C0,
+ CPU_POWERPC_NPE405H2 = 0x41410140,
+ CPU_POWERPC_NPE405L = 0x416100C0,
+ CPU_POWERPC_NPE4GS3 = 0x40B10000,
+#if 0
+ CPU_POWERPC_NPCxx1 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_NPR161 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_LC77700 = xxx,
+#endif
+ /* IBM STBxxx (PowerPC 401/403/405 core based microcontrollers) */
+#if 0
+ CPU_POWERPC_STB01000 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB01010 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB0210 = xxx, /* 401B3 */
+#endif
+ CPU_POWERPC_STB03 = 0x40310000, /* 0x40130000 ? */
+#if 0
+ CPU_POWERPC_STB043 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB045 = xxx,
+#endif
+ CPU_POWERPC_STB04 = 0x41810000,
+ CPU_POWERPC_STB25 = 0x51510950,
+#if 0
+ CPU_POWERPC_STB130 = xxx,
+#endif
+ /* Xilinx cores */
+ CPU_POWERPC_X2VP4 = 0x20010820,
+#define CPU_POWERPC_X2VP7 CPU_POWERPC_X2VP4
+ CPU_POWERPC_X2VP20 = 0x20010860,
+#define CPU_POWERPC_X2VP50 CPU_POWERPC_X2VP20
+#if 0
+ CPU_POWERPC_ZL10310 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10311 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10320 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10321 = xxx,
+#endif
+ /* PowerPC 440 family */
+ /* Generic PowerPC 440 */
+#define CPU_POWERPC_440 CPU_POWERPC_440GXf
+ /* PowerPC 440 cores */
+#if 0
+ CPU_POWERPC_440A4 = xxx,
+#endif
+ CPU_POWERPC_440_XILINX = 0x7ff21910,
+#if 0
+ CPU_POWERPC_440A5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440B4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440F5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440G5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440H4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440H6 = xxx,
+#endif
+ /* PowerPC 440 microcontrolers */
+#define CPU_POWERPC_440EP CPU_POWERPC_440EPb
+ CPU_POWERPC_440EPa = 0x42221850,
+ CPU_POWERPC_440EPb = 0x422218D3,
+#define CPU_POWERPC_440GP CPU_POWERPC_440GPc
+ CPU_POWERPC_440GPb = 0x40120440,
+ CPU_POWERPC_440GPc = 0x40120481,
+#define CPU_POWERPC_440GR CPU_POWERPC_440GRa
+#define CPU_POWERPC_440GRa CPU_POWERPC_440EPb
+ CPU_POWERPC_440GRX = 0x200008D0,
+#define CPU_POWERPC_440EPX CPU_POWERPC_440GRX
+#define CPU_POWERPC_440GX CPU_POWERPC_440GXf
+ CPU_POWERPC_440GXa = 0x51B21850,
+ CPU_POWERPC_440GXb = 0x51B21851,
+ CPU_POWERPC_440GXc = 0x51B21892,
+ CPU_POWERPC_440GXf = 0x51B21894,
+#if 0
+ CPU_POWERPC_440S = xxx,
+#endif
+ CPU_POWERPC_440SP = 0x53221850,
+ CPU_POWERPC_440SP2 = 0x53221891,
+ CPU_POWERPC_440SPE = 0x53421890,
+ /* PowerPC 460 family */
+#if 0
+ /* Generic PowerPC 464 */
+#define CPU_POWERPC_464 CPU_POWERPC_464H90
+#endif
+ /* PowerPC 464 microcontrolers */
+#if 0
+ CPU_POWERPC_464H90 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_464H90FP = xxx,
+#endif
+ /* Freescale embedded PowerPC cores */
+ /* PowerPC MPC 5xx cores (aka RCPU) */
+ CPU_POWERPC_MPC5xx = 0x00020020,
+#define CPU_POWERPC_MGT560 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC509 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC533 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC534 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC555 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC556 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC560 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC561 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC562 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC563 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC564 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC565 CPU_POWERPC_MPC5xx
+#define CPU_POWERPC_MPC566 CPU_POWERPC_MPC5xx
+ /* PowerPC MPC 8xx cores (aka PowerQUICC) */
+ CPU_POWERPC_MPC8xx = 0x00500000,
+#define CPU_POWERPC_MGT823 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC821 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC823 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC850 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC852T CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC855T CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC857 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC859 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC860 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC862 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC866 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC870 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC875 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC880 CPU_POWERPC_MPC8xx
+#define CPU_POWERPC_MPC885 CPU_POWERPC_MPC8xx
+ /* G2 cores (aka PowerQUICC-II) */
+ CPU_POWERPC_G2 = 0x00810011,
+ CPU_POWERPC_G2H4 = 0x80811010,
+ CPU_POWERPC_G2gp = 0x80821010,
+ CPU_POWERPC_G2ls = 0x90810010,
+ CPU_POWERPC_MPC603 = 0x00810100,
+ CPU_POWERPC_G2_HIP3 = 0x00810101,
+ CPU_POWERPC_G2_HIP4 = 0x80811014,
+ /* G2_LE core (aka PowerQUICC-II) */
+ CPU_POWERPC_G2LE = 0x80820010,
+ CPU_POWERPC_G2LEgp = 0x80822010,
+ CPU_POWERPC_G2LEls = 0xA0822010,
+ CPU_POWERPC_G2LEgp1 = 0x80822011,
+ CPU_POWERPC_G2LEgp3 = 0x80822013,
+ /* MPC52xx microcontrollers */
+ /* XXX: MPC 5121 ? */
+#define CPU_POWERPC_MPC52xx CPU_POWERPC_MPC5200
+#define CPU_POWERPC_MPC5200 CPU_POWERPC_MPC5200_v12
+#define CPU_POWERPC_MPC5200_v10 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v11 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v12 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B CPU_POWERPC_MPC5200B_v21
+#define CPU_POWERPC_MPC5200B_v20 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B_v21 CPU_POWERPC_G2LEgp1
+ /* MPC82xx microcontrollers */
+#define CPU_POWERPC_MPC82xx CPU_POWERPC_MPC8280
+#define CPU_POWERPC_MPC8240 CPU_POWERPC_MPC603
+#define CPU_POWERPC_MPC8241 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8245 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8247 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8248 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8250 CPU_POWERPC_MPC8250_HiP4
+#define CPU_POWERPC_MPC8250_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8250_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8255 CPU_POWERPC_MPC8255_HiP4
+#define CPU_POWERPC_MPC8255_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8255_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8260 CPU_POWERPC_MPC8260_HiP4
+#define CPU_POWERPC_MPC8260_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8260_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8264 CPU_POWERPC_MPC8264_HiP4
+#define CPU_POWERPC_MPC8264_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8264_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8265 CPU_POWERPC_MPC8265_HiP4
+#define CPU_POWERPC_MPC8265_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8265_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8266 CPU_POWERPC_MPC8266_HiP4
+#define CPU_POWERPC_MPC8266_HiP3 CPU_POWERPC_G2_HIP3
+#define CPU_POWERPC_MPC8266_HiP4 CPU_POWERPC_G2_HIP4
+#define CPU_POWERPC_MPC8270 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8271 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8272 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8275 CPU_POWERPC_G2LEgp3
+#define CPU_POWERPC_MPC8280 CPU_POWERPC_G2LEgp3
+ /* e200 family */
+ /* e200 cores */
+#define CPU_POWERPC_e200 CPU_POWERPC_e200z6
+#if 0
+ CPU_POWERPC_e200z0 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_e200z1 = xxx,
+#endif
+#if 0 /* ? */
+ CPU_POWERPC_e200z3 = 0x81120000,
+#endif
+ CPU_POWERPC_e200z5 = 0x81000000,
+ CPU_POWERPC_e200z6 = 0x81120000,
+ /* MPC55xx microcontrollers */
+#define CPU_POWERPC_MPC55xx CPU_POWERPC_MPC5567
+#if 0
+#define CPU_POWERPC_MPC5514E CPU_POWERPC_MPC5514E_v1
+#define CPU_POWERPC_MPC5514E_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5514E_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5514G CPU_POWERPC_MPC5514G_v1
+#define CPU_POWERPC_MPC5514G_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5514G_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5515S CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516E CPU_POWERPC_MPC5516E_v1
+#define CPU_POWERPC_MPC5516E_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5516E_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516G CPU_POWERPC_MPC5516G_v1
+#define CPU_POWERPC_MPC5516G_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5516G_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516S CPU_POWERPC_e200z1
+#endif
+#if 0
+#define CPU_POWERPC_MPC5533 CPU_POWERPC_e200z3
+#define CPU_POWERPC_MPC5534 CPU_POWERPC_e200z3
+#endif
+#define CPU_POWERPC_MPC5553 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5554 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5561 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5565 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5566 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5567 CPU_POWERPC_e200z6
+ /* e300 family */
+ /* e300 cores */
+#define CPU_POWERPC_e300 CPU_POWERPC_e300c3
+ CPU_POWERPC_e300c1 = 0x00830010,
+ CPU_POWERPC_e300c2 = 0x00840010,
+ CPU_POWERPC_e300c3 = 0x00850010,
+ CPU_POWERPC_e300c4 = 0x00860010,
+ /* MPC83xx microcontrollers */
+#define CPU_POWERPC_MPC831x CPU_POWERPC_e300c3
+#define CPU_POWERPC_MPC832x CPU_POWERPC_e300c2
+#define CPU_POWERPC_MPC834x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC835x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC836x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC837x CPU_POWERPC_e300c4
+ /* e500 family */
+ /* e500 cores */
+#define CPU_POWERPC_e500 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_e500v1 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_e500v2 CPU_POWERPC_e500v2_v22
+ CPU_POWERPC_e500v1_v10 = 0x80200010,
+ CPU_POWERPC_e500v1_v20 = 0x80200020,
+ CPU_POWERPC_e500v2_v10 = 0x80210010,
+ CPU_POWERPC_e500v2_v11 = 0x80210011,
+ CPU_POWERPC_e500v2_v20 = 0x80210020,
+ CPU_POWERPC_e500v2_v21 = 0x80210021,
+ CPU_POWERPC_e500v2_v22 = 0x80210022,
+ CPU_POWERPC_e500v2_v30 = 0x80210030,
+ /* MPC85xx microcontrollers */
+#define CPU_POWERPC_MPC8533 CPU_POWERPC_MPC8533_v11
+#define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8533E CPU_POWERPC_MPC8533E_v11
+#define CPU_POWERPC_MPC8533E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8540 CPU_POWERPC_MPC8540_v21
+#define CPU_POWERPC_MPC8540_v10 CPU_POWERPC_e500v1_v10
+#define CPU_POWERPC_MPC8540_v20 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8540_v21 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541 CPU_POWERPC_MPC8541_v11
+#define CPU_POWERPC_MPC8541_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E CPU_POWERPC_MPC8541E_v11
+#define CPU_POWERPC_MPC8541E_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8543 CPU_POWERPC_MPC8543_v21
+#define CPU_POWERPC_MPC8543_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8543E CPU_POWERPC_MPC8543E_v21
+#define CPU_POWERPC_MPC8543E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544 CPU_POWERPC_MPC8544_v11
+#define CPU_POWERPC_MPC8544_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E CPU_POWERPC_MPC8544E_v11
+#define CPU_POWERPC_MPC8544E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545 CPU_POWERPC_MPC8545_v21
+#define CPU_POWERPC_MPC8545_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545E CPU_POWERPC_MPC8545E_v21
+#define CPU_POWERPC_MPC8545E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8547E CPU_POWERPC_MPC8545E_v21
+#define CPU_POWERPC_MPC8547E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8547E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8547E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548 CPU_POWERPC_MPC8548_v21
+#define CPU_POWERPC_MPC8548_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548E CPU_POWERPC_MPC8548E_v21
+#define CPU_POWERPC_MPC8548E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8555 CPU_POWERPC_MPC8555_v11
+#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8555E CPU_POWERPC_MPC8555E_v11
+#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8560 CPU_POWERPC_MPC8560_v21
+#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8572 CPU_POWERPC_e500v2_v30
+#define CPU_POWERPC_MPC8572E CPU_POWERPC_e500v2_v30
+ /* e600 family */
+ /* e600 cores */
+ CPU_POWERPC_e600 = 0x80040010,
+ /* MPC86xx microcontrollers */
+#define CPU_POWERPC_MPC8610 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641D CPU_POWERPC_e600
+ /* PowerPC 6xx cores */
+#define CPU_POWERPC_601 CPU_POWERPC_601_v2
+ CPU_POWERPC_601_v0 = 0x00010001,
+ CPU_POWERPC_601_v1 = 0x00010001,
+#define CPU_POWERPC_601v CPU_POWERPC_601_v2
+ CPU_POWERPC_601_v2 = 0x00010002,
+ CPU_POWERPC_602 = 0x00050100,
+ CPU_POWERPC_603 = 0x00030100,
+#define CPU_POWERPC_603E CPU_POWERPC_603E_v41
+ CPU_POWERPC_603E_v11 = 0x00060101,
+ CPU_POWERPC_603E_v12 = 0x00060102,
+ CPU_POWERPC_603E_v13 = 0x00060103,
+ CPU_POWERPC_603E_v14 = 0x00060104,
+ CPU_POWERPC_603E_v22 = 0x00060202,
+ CPU_POWERPC_603E_v3 = 0x00060300,
+ CPU_POWERPC_603E_v4 = 0x00060400,
+ CPU_POWERPC_603E_v41 = 0x00060401,
+ CPU_POWERPC_603E7t = 0x00071201,
+ CPU_POWERPC_603E7v = 0x00070100,
+ CPU_POWERPC_603E7v1 = 0x00070101,
+ CPU_POWERPC_603E7v2 = 0x00070201,
+ CPU_POWERPC_603E7 = 0x00070200,
+ CPU_POWERPC_603P = 0x00070000,
+#define CPU_POWERPC_603R CPU_POWERPC_603E7t
+ /* XXX: missing 0x00040303 (604) */
+ CPU_POWERPC_604 = 0x00040103,
+#define CPU_POWERPC_604E CPU_POWERPC_604E_v24
+ /* XXX: missing 0x00091203 */
+ /* XXX: missing 0x00092110 */
+ /* XXX: missing 0x00092120 */
+ CPU_POWERPC_604E_v10 = 0x00090100,
+ CPU_POWERPC_604E_v22 = 0x00090202,
+ CPU_POWERPC_604E_v24 = 0x00090204,
+ /* XXX: missing 0x000a0100 */
+ /* XXX: missing 0x00093102 */
+ CPU_POWERPC_604R = 0x000a0101,
+#if 0
+ CPU_POWERPC_604EV = xxx, /* XXX: same as 604R ? */
+#endif
+ /* PowerPC 740/750 cores (aka G3) */
+ /* XXX: missing 0x00084202 */
+#define CPU_POWERPC_7x0 CPU_POWERPC_7x0_v31
+ CPU_POWERPC_7x0_v10 = 0x00080100,
+ CPU_POWERPC_7x0_v20 = 0x00080200,
+ CPU_POWERPC_7x0_v21 = 0x00080201,
+ CPU_POWERPC_7x0_v22 = 0x00080202,
+ CPU_POWERPC_7x0_v30 = 0x00080300,
+ CPU_POWERPC_7x0_v31 = 0x00080301,
+ CPU_POWERPC_740E = 0x00080100,
+ CPU_POWERPC_750E = 0x00080200,
+ CPU_POWERPC_7x0P = 0x10080000,
+ /* XXX: missing 0x00087010 (CL ?) */
+#define CPU_POWERPC_750CL CPU_POWERPC_750CL_v20
+ CPU_POWERPC_750CL_v10 = 0x00087200,
+ CPU_POWERPC_750CL_v20 = 0x00087210, /* aka rev E */
+#define CPU_POWERPC_750CX CPU_POWERPC_750CX_v22
+ CPU_POWERPC_750CX_v10 = 0x00082100,
+ CPU_POWERPC_750CX_v20 = 0x00082200,
+ CPU_POWERPC_750CX_v21 = 0x00082201,
+ CPU_POWERPC_750CX_v22 = 0x00082202,
+#define CPU_POWERPC_750CXE CPU_POWERPC_750CXE_v31b
+ CPU_POWERPC_750CXE_v21 = 0x00082211,
+ CPU_POWERPC_750CXE_v22 = 0x00082212,
+ CPU_POWERPC_750CXE_v23 = 0x00082213,
+ CPU_POWERPC_750CXE_v24 = 0x00082214,
+ CPU_POWERPC_750CXE_v24b = 0x00083214,
+ CPU_POWERPC_750CXE_v30 = 0x00082310,
+ CPU_POWERPC_750CXE_v31 = 0x00082311,
+ CPU_POWERPC_750CXE_v31b = 0x00083311,
+ CPU_POWERPC_750CXR = 0x00083410,
+ CPU_POWERPC_750FL = 0x70000203,
+#define CPU_POWERPC_750FX CPU_POWERPC_750FX_v23
+ CPU_POWERPC_750FX_v10 = 0x70000100,
+ CPU_POWERPC_750FX_v20 = 0x70000200,
+ CPU_POWERPC_750FX_v21 = 0x70000201,
+ CPU_POWERPC_750FX_v22 = 0x70000202,
+ CPU_POWERPC_750FX_v23 = 0x70000203,
+ CPU_POWERPC_750GL = 0x70020102,
+#define CPU_POWERPC_750GX CPU_POWERPC_750GX_v12
+ CPU_POWERPC_750GX_v10 = 0x70020100,
+ CPU_POWERPC_750GX_v11 = 0x70020101,
+ CPU_POWERPC_750GX_v12 = 0x70020102,
+#define CPU_POWERPC_750L CPU_POWERPC_750L_v32 /* Aka LoneStar */
+ CPU_POWERPC_750L_v20 = 0x00088200,
+ CPU_POWERPC_750L_v21 = 0x00088201,
+ CPU_POWERPC_750L_v22 = 0x00088202,
+ CPU_POWERPC_750L_v30 = 0x00088300,
+ CPU_POWERPC_750L_v32 = 0x00088302,
+ /* PowerPC 745/755 cores */
+#define CPU_POWERPC_7x5 CPU_POWERPC_7x5_v28
+ CPU_POWERPC_7x5_v10 = 0x00083100,
+ CPU_POWERPC_7x5_v11 = 0x00083101,
+ CPU_POWERPC_7x5_v20 = 0x00083200,
+ CPU_POWERPC_7x5_v21 = 0x00083201,
+ CPU_POWERPC_7x5_v22 = 0x00083202, /* aka D */
+ CPU_POWERPC_7x5_v23 = 0x00083203, /* aka E */
+ CPU_POWERPC_7x5_v24 = 0x00083204,
+ CPU_POWERPC_7x5_v25 = 0x00083205,
+ CPU_POWERPC_7x5_v26 = 0x00083206,
+ CPU_POWERPC_7x5_v27 = 0x00083207,
+ CPU_POWERPC_7x5_v28 = 0x00083208,
+#if 0
+ CPU_POWERPC_7x5P = xxx,
+#endif
+ /* PowerPC 74xx cores (aka G4) */
+ /* XXX: missing 0x000C1101 */
+#define CPU_POWERPC_7400 CPU_POWERPC_7400_v29
+ CPU_POWERPC_7400_v10 = 0x000C0100,
+ CPU_POWERPC_7400_v11 = 0x000C0101,
+ CPU_POWERPC_7400_v20 = 0x000C0200,
+ CPU_POWERPC_7400_v21 = 0x000C0201,
+ CPU_POWERPC_7400_v22 = 0x000C0202,
+ CPU_POWERPC_7400_v26 = 0x000C0206,
+ CPU_POWERPC_7400_v27 = 0x000C0207,
+ CPU_POWERPC_7400_v28 = 0x000C0208,
+ CPU_POWERPC_7400_v29 = 0x000C0209,
+#define CPU_POWERPC_7410 CPU_POWERPC_7410_v14
+ CPU_POWERPC_7410_v10 = 0x800C1100,
+ CPU_POWERPC_7410_v11 = 0x800C1101,
+ CPU_POWERPC_7410_v12 = 0x800C1102, /* aka C */
+ CPU_POWERPC_7410_v13 = 0x800C1103, /* aka D */
+ CPU_POWERPC_7410_v14 = 0x800C1104, /* aka E */
+#define CPU_POWERPC_7448 CPU_POWERPC_7448_v21
+ CPU_POWERPC_7448_v10 = 0x80040100,
+ CPU_POWERPC_7448_v11 = 0x80040101,
+ CPU_POWERPC_7448_v20 = 0x80040200,
+ CPU_POWERPC_7448_v21 = 0x80040201,
+#define CPU_POWERPC_7450 CPU_POWERPC_7450_v21
+ CPU_POWERPC_7450_v10 = 0x80000100,
+ CPU_POWERPC_7450_v11 = 0x80000101,
+ CPU_POWERPC_7450_v12 = 0x80000102,
+ CPU_POWERPC_7450_v20 = 0x80000200, /* aka A, B, C, D: 2.04 */
+ CPU_POWERPC_7450_v21 = 0x80000201, /* aka E */
+#define CPU_POWERPC_74x1 CPU_POWERPC_74x1_v23
+ CPU_POWERPC_74x1_v23 = 0x80000203, /* aka G: 2.3 */
+ /* XXX: this entry might be a bug in some documentation */
+ CPU_POWERPC_74x1_v210 = 0x80000210, /* aka G: 2.3 ? */
+#define CPU_POWERPC_74x5 CPU_POWERPC_74x5_v32
+ CPU_POWERPC_74x5_v10 = 0x80010100,
+ /* XXX: missing 0x80010200 */
+ CPU_POWERPC_74x5_v21 = 0x80010201, /* aka C: 2.1 */
+ CPU_POWERPC_74x5_v32 = 0x80010302,
+ CPU_POWERPC_74x5_v33 = 0x80010303, /* aka F: 3.3 */
+ CPU_POWERPC_74x5_v34 = 0x80010304, /* aka G: 3.4 */
+#define CPU_POWERPC_74x7 CPU_POWERPC_74x7_v12
+ CPU_POWERPC_74x7_v10 = 0x80020100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7_v11 = 0x80020101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7_v12 = 0x80020102, /* aka C: 1.2 */
+#define CPU_POWERPC_74x7A CPU_POWERPC_74x7A_v12
+ CPU_POWERPC_74x7A_v10 = 0x80030100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7A_v11 = 0x80030101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7A_v12 = 0x80030102, /* aka C: 1.2 */
+ /* 64 bits PowerPC */
+#if defined(TARGET_PPC64)
+ CPU_POWERPC_620 = 0x00140000,
+ CPU_POWERPC_630 = 0x00400000,
+ CPU_POWERPC_631 = 0x00410104,
+ CPU_POWERPC_POWER4 = 0x00350000,
+ CPU_POWERPC_POWER4P = 0x00380000,
+ /* XXX: missing 0x003A0201 */
+ CPU_POWERPC_POWER5 = 0x003A0203,
+#define CPU_POWERPC_POWER5GR CPU_POWERPC_POWER5
+ CPU_POWERPC_POWER5P = 0x003B0000,
+#define CPU_POWERPC_POWER5GS CPU_POWERPC_POWER5P
+ CPU_POWERPC_POWER6 = 0x003E0000,
+ CPU_POWERPC_POWER6_5 = 0x0F000001, /* POWER6 in POWER5 mode */
+ CPU_POWERPC_POWER6A = 0x0F000002,
+ CPU_POWERPC_970 = 0x00390202,
+#define CPU_POWERPC_970FX CPU_POWERPC_970FX_v31
+ CPU_POWERPC_970FX_v10 = 0x00391100,
+ CPU_POWERPC_970FX_v20 = 0x003C0200,
+ CPU_POWERPC_970FX_v21 = 0x003C0201,
+ CPU_POWERPC_970FX_v30 = 0x003C0300,
+ CPU_POWERPC_970FX_v31 = 0x003C0301,
+ CPU_POWERPC_970GX = 0x00450000,
+#define CPU_POWERPC_970MP CPU_POWERPC_970MP_v11
+ CPU_POWERPC_970MP_v10 = 0x00440100,
+ CPU_POWERPC_970MP_v11 = 0x00440101,
+#define CPU_POWERPC_CELL CPU_POWERPC_CELL_v32
+ CPU_POWERPC_CELL_v10 = 0x00700100,
+ CPU_POWERPC_CELL_v20 = 0x00700400,
+ CPU_POWERPC_CELL_v30 = 0x00700500,
+ CPU_POWERPC_CELL_v31 = 0x00700501,
+#define CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v31
+ CPU_POWERPC_RS64 = 0x00330000,
+ CPU_POWERPC_RS64II = 0x00340000,
+ CPU_POWERPC_RS64III = 0x00360000,
+ CPU_POWERPC_RS64IV = 0x00370000,
+#endif /* defined(TARGET_PPC64) */
+ /* Original POWER */
+ /* XXX: should be POWER (RIOS), RSC3308, RSC4608,
+ * POWER2 (RIOS2) & RSC2 (P2SC) here
+ */
+#if 0
+ CPU_POWER = xxx, /* 0x20000 ? 0x30000 for RSC ? */
+#endif
+#if 0
+ CPU_POWER2 = xxx, /* 0x40000 ? */
+#endif
+ /* PA Semi core */
+ CPU_POWERPC_PA6T = 0x00900000,
+};
+
+/* System version register (used on MPC 8xxx) */
+enum {
+ POWERPC_SVR_NONE = 0x00000000,
+#define POWERPC_SVR_52xx POWERPC_SVR_5200
+#define POWERPC_SVR_5200 POWERPC_SVR_5200_v12
+ POWERPC_SVR_5200_v10 = 0x80110010,
+ POWERPC_SVR_5200_v11 = 0x80110011,
+ POWERPC_SVR_5200_v12 = 0x80110012,
+#define POWERPC_SVR_5200B POWERPC_SVR_5200B_v21
+ POWERPC_SVR_5200B_v20 = 0x80110020,
+ POWERPC_SVR_5200B_v21 = 0x80110021,
+#define POWERPC_SVR_55xx POWERPC_SVR_5567
+#if 0
+ POWERPC_SVR_5533 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5534 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5553 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5554 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5561 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5565 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5566 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5567 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8313 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8313E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8314 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8314E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8315 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8315E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8321 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8321E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8323 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8323E = xxx,
+#endif
+ POWERPC_SVR_8343 = 0x80570010,
+ POWERPC_SVR_8343A = 0x80570030,
+ POWERPC_SVR_8343E = 0x80560010,
+ POWERPC_SVR_8343EA = 0x80560030,
+#define POWERPC_SVR_8347 POWERPC_SVR_8347T
+ POWERPC_SVR_8347P = 0x80550010, /* PBGA package */
+ POWERPC_SVR_8347T = 0x80530010, /* TBGA package */
+#define POWERPC_SVR_8347A POWERPC_SVR_8347AT
+ POWERPC_SVR_8347AP = 0x80550030, /* PBGA package */
+ POWERPC_SVR_8347AT = 0x80530030, /* TBGA package */
+#define POWERPC_SVR_8347E POWERPC_SVR_8347ET
+ POWERPC_SVR_8347EP = 0x80540010, /* PBGA package */
+ POWERPC_SVR_8347ET = 0x80520010, /* TBGA package */
+#define POWERPC_SVR_8347EA POWERPC_SVR_8347EAT
+ POWERPC_SVR_8347EAP = 0x80540030, /* PBGA package */
+ POWERPC_SVR_8347EAT = 0x80520030, /* TBGA package */
+ POWERPC_SVR_8349 = 0x80510010,
+ POWERPC_SVR_8349A = 0x80510030,
+ POWERPC_SVR_8349E = 0x80500010,
+ POWERPC_SVR_8349EA = 0x80500030,
+#if 0
+ POWERPC_SVR_8358E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8360E = xxx,
+#endif
+#define POWERPC_SVR_E500 0x40000000
+ POWERPC_SVR_8377 = 0x80C70010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8377E = 0x80C60010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378 = 0x80C50010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378E = 0x80C40010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379 = 0x80C30010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379E = 0x80C00010 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8533 POWERPC_SVR_8533_v11
+ POWERPC_SVR_8533_v10 = 0x80340010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533_v11 = 0x80340011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8533E POWERPC_SVR_8533E_v11
+ POWERPC_SVR_8533E_v10 = 0x803C0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533E_v11 = 0x803C0011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8540 POWERPC_SVR_8540_v21
+ POWERPC_SVR_8540_v10 = 0x80300010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v20 = 0x80300020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v21 = 0x80300021 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8541 POWERPC_SVR_8541_v11
+ POWERPC_SVR_8541_v10 = 0x80720010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541_v11 = 0x80720011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8541E POWERPC_SVR_8541E_v11
+ POWERPC_SVR_8541E_v10 = 0x807A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541E_v11 = 0x807A0011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8543 POWERPC_SVR_8543_v21
+ POWERPC_SVR_8543_v10 = 0x80320010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v11 = 0x80320011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v20 = 0x80320020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v21 = 0x80320021 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8543E POWERPC_SVR_8543E_v21
+ POWERPC_SVR_8543E_v10 = 0x803A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v11 = 0x803A0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v20 = 0x803A0020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v21 = 0x803A0021 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8544 POWERPC_SVR_8544_v11
+ POWERPC_SVR_8544_v10 = 0x80340110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544_v11 = 0x80340111 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8544E POWERPC_SVR_8544E_v11
+ POWERPC_SVR_8544E_v10 = 0x803C0110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544E_v11 = 0x803C0111 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8545 POWERPC_SVR_8545_v21
+ POWERPC_SVR_8545_v20 = 0x80310220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545_v21 = 0x80310221 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8545E POWERPC_SVR_8545E_v21
+ POWERPC_SVR_8545E_v20 = 0x80390220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545E_v21 = 0x80390221 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8547E POWERPC_SVR_8547E_v21
+ POWERPC_SVR_8547E_v20 = 0x80390120 | POWERPC_SVR_E500,
+ POWERPC_SVR_8547E_v21 = 0x80390121 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8548 POWERPC_SVR_8548_v21
+ POWERPC_SVR_8548_v10 = 0x80310010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v11 = 0x80310011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v20 = 0x80310020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v21 = 0x80310021 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8548E POWERPC_SVR_8548E_v21
+ POWERPC_SVR_8548E_v10 = 0x80390010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v11 = 0x80390011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v20 = 0x80390020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v21 = 0x80390021 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8555 POWERPC_SVR_8555_v11
+ POWERPC_SVR_8555_v10 = 0x80710010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555_v11 = 0x80710011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8555E POWERPC_SVR_8555_v11
+ POWERPC_SVR_8555E_v10 = 0x80790010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555E_v11 = 0x80790011 | POWERPC_SVR_E500,
+#define POWERPC_SVR_8560 POWERPC_SVR_8560_v21
+ POWERPC_SVR_8560_v10 = 0x80700010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v20 = 0x80700020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v21 = 0x80700021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567 = 0x80750111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567E = 0x807D0111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568 = 0x80750011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568E = 0x807D0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572 = 0x80E00010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572E = 0x80E80010 | POWERPC_SVR_E500,
+#if 0
+ POWERPC_SVR_8610 = xxx,
+#endif
+ POWERPC_SVR_8641 = 0x80900021,
+ POWERPC_SVR_8641D = 0x80900121,
+};
+
+/*****************************************************************************/
+/* PowerPC CPU definitions */
+#define POWERPC_DEF_SVR(_name, _pvr, _svr, _type) \
+ { \
+ .name = _name, \
+ .pvr = _pvr, \
+ .svr = _svr, \
+ .insns_flags = glue(POWERPC_INSNS_,_type), \
+ .msr_mask = glue(POWERPC_MSRM_,_type), \
+ .mmu_model = glue(POWERPC_MMU_,_type), \
+ .excp_model = glue(POWERPC_EXCP_,_type), \
+ .bus_model = glue(POWERPC_INPUT_,_type), \
+ .bfd_mach = glue(POWERPC_BFDM_,_type), \
+ .flags = glue(POWERPC_FLAG_,_type), \
+ .init_proc = &glue(init_proc_,_type), \
+ .check_pow = &glue(check_pow_,_type), \
+ }
+#define POWERPC_DEF(_name, _pvr, _type) \
+POWERPC_DEF_SVR(_name, _pvr, POWERPC_SVR_NONE, _type)
+
+static const ppc_def_t ppc_defs[] = {
+ /* Embedded PowerPC */
+ /* PowerPC 401 family */
+ /* Generic PowerPC 401 */
+ POWERPC_DEF("401", CPU_POWERPC_401, 401),
+ /* PowerPC 401 cores */
+ /* PowerPC 401A1 */
+ POWERPC_DEF("401A1", CPU_POWERPC_401A1, 401),
+ /* PowerPC 401B2 */
+ POWERPC_DEF("401B2", CPU_POWERPC_401B2, 401x2),
+#if defined (TODO)
+ /* PowerPC 401B3 */
+ POWERPC_DEF("401B3", CPU_POWERPC_401B3, 401x3),
+#endif
+ /* PowerPC 401C2 */
+ POWERPC_DEF("401C2", CPU_POWERPC_401C2, 401x2),
+ /* PowerPC 401D2 */
+ POWERPC_DEF("401D2", CPU_POWERPC_401D2, 401x2),
+ /* PowerPC 401E2 */
+ POWERPC_DEF("401E2", CPU_POWERPC_401E2, 401x2),
+ /* PowerPC 401F2 */
+ POWERPC_DEF("401F2", CPU_POWERPC_401F2, 401x2),
+ /* PowerPC 401G2 */
+ /* XXX: to be checked */
+ POWERPC_DEF("401G2", CPU_POWERPC_401G2, 401x2),
+ /* PowerPC 401 microcontrolers */
+#if defined (TODO)
+ /* PowerPC 401GF */
+ POWERPC_DEF("401GF", CPU_POWERPC_401GF, 401),
+#endif
+ /* IOP480 (401 microcontroler) */
+ POWERPC_DEF("IOP480", CPU_POWERPC_IOP480, IOP480),
+ /* IBM Processor for Network Resources */
+ POWERPC_DEF("Cobra", CPU_POWERPC_COBRA, 401),
+#if defined (TODO)
+ POWERPC_DEF("Xipchip", CPU_POWERPC_XIPCHIP, 401),
+#endif
+ /* PowerPC 403 family */
+ /* Generic PowerPC 403 */
+ POWERPC_DEF("403", CPU_POWERPC_403, 403),
+ /* PowerPC 403 microcontrolers */
+ /* PowerPC 403 GA */
+ POWERPC_DEF("403GA", CPU_POWERPC_403GA, 403),
+ /* PowerPC 403 GB */
+ POWERPC_DEF("403GB", CPU_POWERPC_403GB, 403),
+ /* PowerPC 403 GC */
+ POWERPC_DEF("403GC", CPU_POWERPC_403GC, 403),
+ /* PowerPC 403 GCX */
+ POWERPC_DEF("403GCX", CPU_POWERPC_403GCX, 403GCX),
+#if defined (TODO)
+ /* PowerPC 403 GP */
+ POWERPC_DEF("403GP", CPU_POWERPC_403GP, 403),
+#endif
+ /* PowerPC 405 family */
+ /* Generic PowerPC 405 */
+ POWERPC_DEF("405", CPU_POWERPC_405, 405),
+ /* PowerPC 405 cores */
+#if defined (TODO)
+ /* PowerPC 405 A3 */
+ POWERPC_DEF("405A3", CPU_POWERPC_405A3, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 A4 */
+ POWERPC_DEF("405A4", CPU_POWERPC_405A4, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 B3 */
+ POWERPC_DEF("405B3", CPU_POWERPC_405B3, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 B4 */
+ POWERPC_DEF("405B4", CPU_POWERPC_405B4, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 C3 */
+ POWERPC_DEF("405C3", CPU_POWERPC_405C3, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 C4 */
+ POWERPC_DEF("405C4", CPU_POWERPC_405C4, 405),
+#endif
+ /* PowerPC 405 D2 */
+ POWERPC_DEF("405D2", CPU_POWERPC_405D2, 405),
+#if defined (TODO)
+ /* PowerPC 405 D3 */
+ POWERPC_DEF("405D3", CPU_POWERPC_405D3, 405),
+#endif
+ /* PowerPC 405 D4 */
+ POWERPC_DEF("405D4", CPU_POWERPC_405D4, 405),
+#if defined (TODO)
+ /* PowerPC 405 D5 */
+ POWERPC_DEF("405D5", CPU_POWERPC_405D5, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 E4 */
+ POWERPC_DEF("405E4", CPU_POWERPC_405E4, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 F4 */
+ POWERPC_DEF("405F4", CPU_POWERPC_405F4, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 F5 */
+ POWERPC_DEF("405F5", CPU_POWERPC_405F5, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC 405 F6 */
+ POWERPC_DEF("405F6", CPU_POWERPC_405F6, 405),
+#endif
+ /* PowerPC 405 microcontrolers */
+ /* PowerPC 405 CR */
+ POWERPC_DEF("405CR", CPU_POWERPC_405CR, 405),
+ /* PowerPC 405 CRa */
+ POWERPC_DEF("405CRa", CPU_POWERPC_405CRa, 405),
+ /* PowerPC 405 CRb */
+ POWERPC_DEF("405CRb", CPU_POWERPC_405CRb, 405),
+ /* PowerPC 405 CRc */
+ POWERPC_DEF("405CRc", CPU_POWERPC_405CRc, 405),
+ /* PowerPC 405 EP */
+ POWERPC_DEF("405EP", CPU_POWERPC_405EP, 405),
+#if defined(TODO)
+ /* PowerPC 405 EXr */
+ POWERPC_DEF("405EXr", CPU_POWERPC_405EXr, 405),
+#endif
+ /* PowerPC 405 EZ */
+ POWERPC_DEF("405EZ", CPU_POWERPC_405EZ, 405),
+#if defined(TODO)
+ /* PowerPC 405 FX */
+ POWERPC_DEF("405FX", CPU_POWERPC_405FX, 405),
+#endif
+ /* PowerPC 405 GP */
+ POWERPC_DEF("405GP", CPU_POWERPC_405GP, 405),
+ /* PowerPC 405 GPa */
+ POWERPC_DEF("405GPa", CPU_POWERPC_405GPa, 405),
+ /* PowerPC 405 GPb */
+ POWERPC_DEF("405GPb", CPU_POWERPC_405GPb, 405),
+ /* PowerPC 405 GPc */
+ POWERPC_DEF("405GPc", CPU_POWERPC_405GPc, 405),
+ /* PowerPC 405 GPd */
+ POWERPC_DEF("405GPd", CPU_POWERPC_405GPd, 405),
+ /* PowerPC 405 GPe */
+ POWERPC_DEF("405GPe", CPU_POWERPC_405GPe, 405),
+ /* PowerPC 405 GPR */
+ POWERPC_DEF("405GPR", CPU_POWERPC_405GPR, 405),
+#if defined(TODO)
+ /* PowerPC 405 H */
+ POWERPC_DEF("405H", CPU_POWERPC_405H, 405),
+#endif
+#if defined(TODO)
+ /* PowerPC 405 L */
+ POWERPC_DEF("405L", CPU_POWERPC_405L, 405),
+#endif
+ /* PowerPC 405 LP */
+ POWERPC_DEF("405LP", CPU_POWERPC_405LP, 405),
+#if defined(TODO)
+ /* PowerPC 405 PM */
+ POWERPC_DEF("405PM", CPU_POWERPC_405PM, 405),
+#endif
+#if defined(TODO)
+ /* PowerPC 405 PS */
+ POWERPC_DEF("405PS", CPU_POWERPC_405PS, 405),
+#endif
+#if defined(TODO)
+ /* PowerPC 405 S */
+ POWERPC_DEF("405S", CPU_POWERPC_405S, 405),
+#endif
+ /* Npe405 H */
+ POWERPC_DEF("Npe405H", CPU_POWERPC_NPE405H, 405),
+ /* Npe405 H2 */
+ POWERPC_DEF("Npe405H2", CPU_POWERPC_NPE405H2, 405),
+ /* Npe405 L */
+ POWERPC_DEF("Npe405L", CPU_POWERPC_NPE405L, 405),
+ /* Npe4GS3 */
+ POWERPC_DEF("Npe4GS3", CPU_POWERPC_NPE4GS3, 405),
+#if defined (TODO)
+ POWERPC_DEF("Npcxx1", CPU_POWERPC_NPCxx1, 405),
+#endif
+#if defined (TODO)
+ POWERPC_DEF("Npr161", CPU_POWERPC_NPR161, 405),
+#endif
+#if defined (TODO)
+ /* PowerPC LC77700 (Sanyo) */
+ POWERPC_DEF("LC77700", CPU_POWERPC_LC77700, 405),
+#endif
+ /* PowerPC 401/403/405 based set-top-box microcontrolers */
+#if defined (TODO)
+ /* STB010000 */
+ POWERPC_DEF("STB01000", CPU_POWERPC_STB01000, 401x2),
+#endif
+#if defined (TODO)
+ /* STB01010 */
+ POWERPC_DEF("STB01010", CPU_POWERPC_STB01010, 401x2),
+#endif
+#if defined (TODO)
+ /* STB0210 */
+ POWERPC_DEF("STB0210", CPU_POWERPC_STB0210, 401x3),
+#endif
+ /* STB03xx */
+ POWERPC_DEF("STB03", CPU_POWERPC_STB03, 405),
+#if defined (TODO)
+ /* STB043x */
+ POWERPC_DEF("STB043", CPU_POWERPC_STB043, 405),
+#endif
+#if defined (TODO)
+ /* STB045x */
+ POWERPC_DEF("STB045", CPU_POWERPC_STB045, 405),
+#endif
+ /* STB04xx */
+ POWERPC_DEF("STB04", CPU_POWERPC_STB04, 405),
+ /* STB25xx */
+ POWERPC_DEF("STB25", CPU_POWERPC_STB25, 405),
+#if defined (TODO)
+ /* STB130 */
+ POWERPC_DEF("STB130", CPU_POWERPC_STB130, 405),
+#endif
+ /* Xilinx PowerPC 405 cores */
+ POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405),
+ POWERPC_DEF("x2vp7", CPU_POWERPC_X2VP7, 405),
+ POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405),
+ POWERPC_DEF("x2vp50", CPU_POWERPC_X2VP50, 405),
+#if defined (TODO)
+ /* Zarlink ZL10310 */
+ POWERPC_DEF("zl10310", CPU_POWERPC_ZL10310, 405),
+#endif
+#if defined (TODO)
+ /* Zarlink ZL10311 */
+ POWERPC_DEF("zl10311", CPU_POWERPC_ZL10311, 405),
+#endif
+#if defined (TODO)
+ /* Zarlink ZL10320 */
+ POWERPC_DEF("zl10320", CPU_POWERPC_ZL10320, 405),
+#endif
+#if defined (TODO)
+ /* Zarlink ZL10321 */
+ POWERPC_DEF("zl10321", CPU_POWERPC_ZL10321, 405),
+#endif
+ /* PowerPC 440 family */
+#if defined(TODO_USER_ONLY)
+ /* Generic PowerPC 440 */
+ POWERPC_DEF("440", CPU_POWERPC_440, 440GP),
+#endif
+ /* PowerPC 440 cores */
+#if defined (TODO)
+ /* PowerPC 440 A4 */
+ POWERPC_DEF("440A4", CPU_POWERPC_440A4, 440x4),
+#endif
+ /* PowerPC 440 Xilinx 5 */
+ POWERPC_DEF("440-Xilinx", CPU_POWERPC_440_XILINX, 440x5),
+#if defined (TODO)
+ /* PowerPC 440 A5 */
+ POWERPC_DEF("440A5", CPU_POWERPC_440A5, 440x5),
+#endif
+#if defined (TODO)
+ /* PowerPC 440 B4 */
+ POWERPC_DEF("440B4", CPU_POWERPC_440B4, 440x4),
+#endif
+#if defined (TODO)
+ /* PowerPC 440 G4 */
+ POWERPC_DEF("440G4", CPU_POWERPC_440G4, 440x4),
+#endif
+#if defined (TODO)
+ /* PowerPC 440 F5 */
+ POWERPC_DEF("440F5", CPU_POWERPC_440F5, 440x5),
+#endif
+#if defined (TODO)
+ /* PowerPC 440 G5 */
+ POWERPC_DEF("440G5", CPU_POWERPC_440G5, 440x5),
+#endif
+#if defined (TODO)
+ /* PowerPC 440H4 */
+ POWERPC_DEF("440H4", CPU_POWERPC_440H4, 440x4),
+#endif
+#if defined (TODO)
+ /* PowerPC 440H6 */
+ POWERPC_DEF("440H6", CPU_POWERPC_440H6, 440Gx5),
+#endif
+ /* PowerPC 440 microcontrolers */
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 EP */
+ POWERPC_DEF("440EP", CPU_POWERPC_440EP, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 EPa */
+ POWERPC_DEF("440EPa", CPU_POWERPC_440EPa, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 EPb */
+ POWERPC_DEF("440EPb", CPU_POWERPC_440EPb, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 EPX */
+ POWERPC_DEF("440EPX", CPU_POWERPC_440EPX, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GP */
+ POWERPC_DEF("440GP", CPU_POWERPC_440GP, 440GP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GPb */
+ POWERPC_DEF("440GPb", CPU_POWERPC_440GPb, 440GP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GPc */
+ POWERPC_DEF("440GPc", CPU_POWERPC_440GPc, 440GP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GR */
+ POWERPC_DEF("440GR", CPU_POWERPC_440GR, 440x5),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GRa */
+ POWERPC_DEF("440GRa", CPU_POWERPC_440GRa, 440x5),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GRX */
+ POWERPC_DEF("440GRX", CPU_POWERPC_440GRX, 440x5),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GX */
+ POWERPC_DEF("440GX", CPU_POWERPC_440GX, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GXa */
+ POWERPC_DEF("440GXa", CPU_POWERPC_440GXa, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GXb */
+ POWERPC_DEF("440GXb", CPU_POWERPC_440GXb, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GXc */
+ POWERPC_DEF("440GXc", CPU_POWERPC_440GXc, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 GXf */
+ POWERPC_DEF("440GXf", CPU_POWERPC_440GXf, 440EP),
+#endif
+#if defined(TODO)
+ /* PowerPC 440 S */
+ POWERPC_DEF("440S", CPU_POWERPC_440S, 440),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 SP */
+ POWERPC_DEF("440SP", CPU_POWERPC_440SP, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 SP2 */
+ POWERPC_DEF("440SP2", CPU_POWERPC_440SP2, 440EP),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* PowerPC 440 SPE */
+ POWERPC_DEF("440SPE", CPU_POWERPC_440SPE, 440EP),
+#endif
+ /* PowerPC 460 family */
+#if defined (TODO)
+ /* Generic PowerPC 464 */
+ POWERPC_DEF("464", CPU_POWERPC_464, 460),
+#endif
+ /* PowerPC 464 microcontrolers */
+#if defined (TODO)
+ /* PowerPC 464H90 */
+ POWERPC_DEF("464H90", CPU_POWERPC_464H90, 460),
+#endif
+#if defined (TODO)
+ /* PowerPC 464H90F */
+ POWERPC_DEF("464H90F", CPU_POWERPC_464H90F, 460F),
+#endif
+ /* Freescale embedded PowerPC cores */
+ /* MPC5xx family (aka RCPU) */
+#if defined(TODO_USER_ONLY)
+ /* Generic MPC5xx core */
+ POWERPC_DEF("MPC5xx", CPU_POWERPC_MPC5xx, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* Codename for MPC5xx core */
+ POWERPC_DEF("RCPU", CPU_POWERPC_MPC5xx, MPC5xx),
+#endif
+ /* MPC5xx microcontrollers */
+#if defined(TODO_USER_ONLY)
+ /* MGT560 */
+ POWERPC_DEF("MGT560", CPU_POWERPC_MGT560, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC509 */
+ POWERPC_DEF("MPC509", CPU_POWERPC_MPC509, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC533 */
+ POWERPC_DEF("MPC533", CPU_POWERPC_MPC533, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC534 */
+ POWERPC_DEF("MPC534", CPU_POWERPC_MPC534, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC555 */
+ POWERPC_DEF("MPC555", CPU_POWERPC_MPC555, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC556 */
+ POWERPC_DEF("MPC556", CPU_POWERPC_MPC556, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC560 */
+ POWERPC_DEF("MPC560", CPU_POWERPC_MPC560, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC561 */
+ POWERPC_DEF("MPC561", CPU_POWERPC_MPC561, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC562 */
+ POWERPC_DEF("MPC562", CPU_POWERPC_MPC562, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC563 */
+ POWERPC_DEF("MPC563", CPU_POWERPC_MPC563, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC564 */
+ POWERPC_DEF("MPC564", CPU_POWERPC_MPC564, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC565 */
+ POWERPC_DEF("MPC565", CPU_POWERPC_MPC565, MPC5xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC566 */
+ POWERPC_DEF("MPC566", CPU_POWERPC_MPC566, MPC5xx),
+#endif
+ /* MPC8xx family (aka PowerQUICC) */
+#if defined(TODO_USER_ONLY)
+ /* Generic MPC8xx core */
+ POWERPC_DEF("MPC8xx", CPU_POWERPC_MPC8xx, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* Codename for MPC8xx core */
+ POWERPC_DEF("PowerQUICC", CPU_POWERPC_MPC8xx, MPC8xx),
+#endif
+ /* MPC8xx microcontrollers */
+#if defined(TODO_USER_ONLY)
+ /* MGT823 */
+ POWERPC_DEF("MGT823", CPU_POWERPC_MGT823, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC821 */
+ POWERPC_DEF("MPC821", CPU_POWERPC_MPC821, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC823 */
+ POWERPC_DEF("MPC823", CPU_POWERPC_MPC823, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC850 */
+ POWERPC_DEF("MPC850", CPU_POWERPC_MPC850, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC852T */
+ POWERPC_DEF("MPC852T", CPU_POWERPC_MPC852T, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC855T */
+ POWERPC_DEF("MPC855T", CPU_POWERPC_MPC855T, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC857 */
+ POWERPC_DEF("MPC857", CPU_POWERPC_MPC857, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC859 */
+ POWERPC_DEF("MPC859", CPU_POWERPC_MPC859, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC860 */
+ POWERPC_DEF("MPC860", CPU_POWERPC_MPC860, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC862 */
+ POWERPC_DEF("MPC862", CPU_POWERPC_MPC862, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC866 */
+ POWERPC_DEF("MPC866", CPU_POWERPC_MPC866, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC870 */
+ POWERPC_DEF("MPC870", CPU_POWERPC_MPC870, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC875 */
+ POWERPC_DEF("MPC875", CPU_POWERPC_MPC875, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC880 */
+ POWERPC_DEF("MPC880", CPU_POWERPC_MPC880, MPC8xx),
+#endif
+#if defined(TODO_USER_ONLY)
+ /* MPC885 */
+ POWERPC_DEF("MPC885", CPU_POWERPC_MPC885, MPC8xx),
+#endif
+ /* MPC82xx family (aka PowerQUICC-II) */
+ /* Generic MPC52xx core */
+ POWERPC_DEF_SVR("MPC52xx",
+ CPU_POWERPC_MPC52xx, POWERPC_SVR_52xx, G2LE),
+ /* Generic MPC82xx core */
+ POWERPC_DEF("MPC82xx", CPU_POWERPC_MPC82xx, G2),
+ /* Codename for MPC82xx */
+ POWERPC_DEF("PowerQUICC-II", CPU_POWERPC_MPC82xx, G2),
+ /* PowerPC G2 core */
+ POWERPC_DEF("G2", CPU_POWERPC_G2, G2),
+ /* PowerPC G2 H4 core */
+ POWERPC_DEF("G2H4", CPU_POWERPC_G2H4, G2),
+ /* PowerPC G2 GP core */
+ POWERPC_DEF("G2GP", CPU_POWERPC_G2gp, G2),
+ /* PowerPC G2 LS core */
+ POWERPC_DEF("G2LS", CPU_POWERPC_G2ls, G2),
+ /* PowerPC G2 HiP3 core */
+ POWERPC_DEF("G2HiP3", CPU_POWERPC_G2_HIP3, G2),
+ /* PowerPC G2 HiP4 core */
+ POWERPC_DEF("G2HiP4", CPU_POWERPC_G2_HIP4, G2),
+ /* PowerPC MPC603 core */
+ POWERPC_DEF("MPC603", CPU_POWERPC_MPC603, 603E),
+ /* PowerPC G2le core (same as G2 plus little-endian mode support) */
+ POWERPC_DEF("G2le", CPU_POWERPC_G2LE, G2LE),
+ /* PowerPC G2LE GP core */
+ POWERPC_DEF("G2leGP", CPU_POWERPC_G2LEgp, G2LE),
+ /* PowerPC G2LE LS core */
+ POWERPC_DEF("G2leLS", CPU_POWERPC_G2LEls, G2LE),
+ /* PowerPC G2LE GP1 core */
+ POWERPC_DEF("G2leGP1", CPU_POWERPC_G2LEgp1, G2LE),
+ /* PowerPC G2LE GP3 core */
+ POWERPC_DEF("G2leGP3", CPU_POWERPC_G2LEgp1, G2LE),
+ /* PowerPC MPC603 microcontrollers */
+ /* MPC8240 */
+ POWERPC_DEF("MPC8240", CPU_POWERPC_MPC8240, 603E),
+ /* PowerPC G2 microcontrollers */
+#if defined(TODO)
+ /* MPC5121 */
+ POWERPC_DEF_SVR("MPC5121",
+ CPU_POWERPC_MPC5121, POWERPC_SVR_5121, G2LE),
+#endif
+ /* MPC5200 */
+ POWERPC_DEF_SVR("MPC5200",
+ CPU_POWERPC_MPC5200, POWERPC_SVR_5200, G2LE),
+ /* MPC5200 v1.0 */
+ POWERPC_DEF_SVR("MPC5200_v10",
+ CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE),
+ /* MPC5200 v1.1 */
+ POWERPC_DEF_SVR("MPC5200_v11",
+ CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE),
+ /* MPC5200 v1.2 */
+ POWERPC_DEF_SVR("MPC5200_v12",
+ CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE),
+ /* MPC5200B */
+ POWERPC_DEF_SVR("MPC5200B",
+ CPU_POWERPC_MPC5200B, POWERPC_SVR_5200B, G2LE),
+ /* MPC5200B v2.0 */
+ POWERPC_DEF_SVR("MPC5200B_v20",
+ CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE),
+ /* MPC5200B v2.1 */
+ POWERPC_DEF_SVR("MPC5200B_v21",
+ CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE),
+ /* MPC8241 */
+ POWERPC_DEF("MPC8241", CPU_POWERPC_MPC8241, G2),
+ /* MPC8245 */
+ POWERPC_DEF("MPC8245", CPU_POWERPC_MPC8245, G2),
+ /* MPC8247 */
+ POWERPC_DEF("MPC8247", CPU_POWERPC_MPC8247, G2LE),
+ /* MPC8248 */
+ POWERPC_DEF("MPC8248", CPU_POWERPC_MPC8248, G2LE),
+ /* MPC8250 */
+ POWERPC_DEF("MPC8250", CPU_POWERPC_MPC8250, G2),
+ /* MPC8250 HiP3 */
+ POWERPC_DEF("MPC8250_HiP3", CPU_POWERPC_MPC8250_HiP3, G2),
+ /* MPC8250 HiP4 */
+ POWERPC_DEF("MPC8250_HiP4", CPU_POWERPC_MPC8250_HiP4, G2),
+ /* MPC8255 */
+ POWERPC_DEF("MPC8255", CPU_POWERPC_MPC8255, G2),
+ /* MPC8255 HiP3 */
+ POWERPC_DEF("MPC8255_HiP3", CPU_POWERPC_MPC8255_HiP3, G2),
+ /* MPC8255 HiP4 */
+ POWERPC_DEF("MPC8255_HiP4", CPU_POWERPC_MPC8255_HiP4, G2),
+ /* MPC8260 */
+ POWERPC_DEF("MPC8260", CPU_POWERPC_MPC8260, G2),
+ /* MPC8260 HiP3 */
+ POWERPC_DEF("MPC8260_HiP3", CPU_POWERPC_MPC8260_HiP3, G2),
+ /* MPC8260 HiP4 */
+ POWERPC_DEF("MPC8260_HiP4", CPU_POWERPC_MPC8260_HiP4, G2),
+ /* MPC8264 */
+ POWERPC_DEF("MPC8264", CPU_POWERPC_MPC8264, G2),
+ /* MPC8264 HiP3 */
+ POWERPC_DEF("MPC8264_HiP3", CPU_POWERPC_MPC8264_HiP3, G2),
+ /* MPC8264 HiP4 */
+ POWERPC_DEF("MPC8264_HiP4", CPU_POWERPC_MPC8264_HiP4, G2),
+ /* MPC8265 */
+ POWERPC_DEF("MPC8265", CPU_POWERPC_MPC8265, G2),
+ /* MPC8265 HiP3 */
+ POWERPC_DEF("MPC8265_HiP3", CPU_POWERPC_MPC8265_HiP3, G2),
+ /* MPC8265 HiP4 */
+ POWERPC_DEF("MPC8265_HiP4", CPU_POWERPC_MPC8265_HiP4, G2),
+ /* MPC8266 */
+ POWERPC_DEF("MPC8266", CPU_POWERPC_MPC8266, G2),
+ /* MPC8266 HiP3 */
+ POWERPC_DEF("MPC8266_HiP3", CPU_POWERPC_MPC8266_HiP3, G2),
+ /* MPC8266 HiP4 */
+ POWERPC_DEF("MPC8266_HiP4", CPU_POWERPC_MPC8266_HiP4, G2),
+ /* MPC8270 */
+ POWERPC_DEF("MPC8270", CPU_POWERPC_MPC8270, G2LE),
+ /* MPC8271 */
+ POWERPC_DEF("MPC8271", CPU_POWERPC_MPC8271, G2LE),
+ /* MPC8272 */
+ POWERPC_DEF("MPC8272", CPU_POWERPC_MPC8272, G2LE),
+ /* MPC8275 */
+ POWERPC_DEF("MPC8275", CPU_POWERPC_MPC8275, G2LE),
+ /* MPC8280 */
+ POWERPC_DEF("MPC8280", CPU_POWERPC_MPC8280, G2LE),
+ /* e200 family */
+ /* Generic PowerPC e200 core */
+ POWERPC_DEF("e200", CPU_POWERPC_e200, e200),
+ /* Generic MPC55xx core */
+#if defined (TODO)
+ POWERPC_DEF_SVR("MPC55xx",
+ CPU_POWERPC_MPC55xx, POWERPC_SVR_55xx, e200),
+#endif
+#if defined (TODO)
+ /* PowerPC e200z0 core */
+ POWERPC_DEF("e200z0", CPU_POWERPC_e200z0, e200),
+#endif
+#if defined (TODO)
+ /* PowerPC e200z1 core */
+ POWERPC_DEF("e200z1", CPU_POWERPC_e200z1, e200),
+#endif
+#if defined (TODO)
+ /* PowerPC e200z3 core */
+ POWERPC_DEF("e200z3", CPU_POWERPC_e200z3, e200),
+#endif
+ /* PowerPC e200z5 core */
+ POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200),
+ /* PowerPC e200z6 core */
+ POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200),
+ /* PowerPC e200 microcontrollers */
+#if defined (TODO)
+ /* MPC5514E */
+ POWERPC_DEF_SVR("MPC5514E",
+ CPU_POWERPC_MPC5514E, POWERPC_SVR_5514E, e200),
+#endif
+#if defined (TODO)
+ /* MPC5514E v0 */
+ POWERPC_DEF_SVR("MPC5514E_v0",
+ CPU_POWERPC_MPC5514E_v0, POWERPC_SVR_5514E_v0, e200),
+#endif
+#if defined (TODO)
+ /* MPC5514E v1 */
+ POWERPC_DEF_SVR("MPC5514E_v1",
+ CPU_POWERPC_MPC5514E_v1, POWERPC_SVR_5514E_v1, e200),
+#endif
+#if defined (TODO)
+ /* MPC5514G */
+ POWERPC_DEF_SVR("MPC5514G",
+ CPU_POWERPC_MPC5514G, POWERPC_SVR_5514G, e200),
+#endif
+#if defined (TODO)
+ /* MPC5514G v0 */
+ POWERPC_DEF_SVR("MPC5514G_v0",
+ CPU_POWERPC_MPC5514G_v0, POWERPC_SVR_5514G_v0, e200),
+#endif
+#if defined (TODO)
+ /* MPC5514G v1 */
+ POWERPC_DEF_SVR("MPC5514G_v1",
+ CPU_POWERPC_MPC5514G_v1, POWERPC_SVR_5514G_v1, e200),
+#endif
+#if defined (TODO)
+ /* MPC5515S */
+ POWERPC_DEF_SVR("MPC5515S",
+ CPU_POWERPC_MPC5515S, POWERPC_SVR_5515S, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516E */
+ POWERPC_DEF_SVR("MPC5516E",
+ CPU_POWERPC_MPC5516E, POWERPC_SVR_5516E, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516E v0 */
+ POWERPC_DEF_SVR("MPC5516E_v0",
+ CPU_POWERPC_MPC5516E_v0, POWERPC_SVR_5516E_v0, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516E v1 */
+ POWERPC_DEF_SVR("MPC5516E_v1",
+ CPU_POWERPC_MPC5516E_v1, POWERPC_SVR_5516E_v1, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516G */
+ POWERPC_DEF_SVR("MPC5516G",
+ CPU_POWERPC_MPC5516G, POWERPC_SVR_5516G, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516G v0 */
+ POWERPC_DEF_SVR("MPC5516G_v0",
+ CPU_POWERPC_MPC5516G_v0, POWERPC_SVR_5516G_v0, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516G v1 */
+ POWERPC_DEF_SVR("MPC5516G_v1",
+ CPU_POWERPC_MPC5516G_v1, POWERPC_SVR_5516G_v1, e200),
+#endif
+#if defined (TODO)
+ /* MPC5516S */
+ POWERPC_DEF_SVR("MPC5516S",
+ CPU_POWERPC_MPC5516S, POWERPC_SVR_5516S, e200),
+#endif
+#if defined (TODO)
+ /* MPC5533 */
+ POWERPC_DEF_SVR("MPC5533",
+ CPU_POWERPC_MPC5533, POWERPC_SVR_5533, e200),
+#endif
+#if defined (TODO)
+ /* MPC5534 */
+ POWERPC_DEF_SVR("MPC5534",
+ CPU_POWERPC_MPC5534, POWERPC_SVR_5534, e200),
+#endif
+#if defined (TODO)
+ /* MPC5553 */
+ POWERPC_DEF_SVR("MPC5553",
+ CPU_POWERPC_MPC5553, POWERPC_SVR_5553, e200),
+#endif
+#if defined (TODO)
+ /* MPC5554 */
+ POWERPC_DEF_SVR("MPC5554",
+ CPU_POWERPC_MPC5554, POWERPC_SVR_5554, e200),
+#endif
+#if defined (TODO)
+ /* MPC5561 */
+ POWERPC_DEF_SVR("MPC5561",
+ CPU_POWERPC_MPC5561, POWERPC_SVR_5561, e200),
+#endif
+#if defined (TODO)
+ /* MPC5565 */
+ POWERPC_DEF_SVR("MPC5565",
+ CPU_POWERPC_MPC5565, POWERPC_SVR_5565, e200),
+#endif
+#if defined (TODO)
+ /* MPC5566 */
+ POWERPC_DEF_SVR("MPC5566",
+ CPU_POWERPC_MPC5566, POWERPC_SVR_5566, e200),
+#endif
+#if defined (TODO)
+ /* MPC5567 */
+ POWERPC_DEF_SVR("MPC5567",
+ CPU_POWERPC_MPC5567, POWERPC_SVR_5567, e200),
+#endif
+ /* e300 family */
+ /* Generic PowerPC e300 core */
+ POWERPC_DEF("e300", CPU_POWERPC_e300, e300),
+ /* PowerPC e300c1 core */
+ POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300),
+ /* PowerPC e300c2 core */
+ POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300),
+ /* PowerPC e300c3 core */
+ POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300),
+ /* PowerPC e300c4 core */
+ POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300),
+ /* PowerPC e300 microcontrollers */
+#if defined (TODO)
+ /* MPC8313 */
+ POWERPC_DEF_SVR("MPC8313",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8313, e300),
+#endif
+#if defined (TODO)
+ /* MPC8313E */
+ POWERPC_DEF_SVR("MPC8313E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8313E, e300),
+#endif
+#if defined (TODO)
+ /* MPC8314 */
+ POWERPC_DEF_SVR("MPC8314",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8314, e300),
+#endif
+#if defined (TODO)
+ /* MPC8314E */
+ POWERPC_DEF_SVR("MPC8314E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8314E, e300),
+#endif
+#if defined (TODO)
+ /* MPC8315 */
+ POWERPC_DEF_SVR("MPC8315",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8315, e300),
+#endif
+#if defined (TODO)
+ /* MPC8315E */
+ POWERPC_DEF_SVR("MPC8315E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8315E, e300),
+#endif
+#if defined (TODO)
+ /* MPC8321 */
+ POWERPC_DEF_SVR("MPC8321",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8321, e300),
+#endif
+#if defined (TODO)
+ /* MPC8321E */
+ POWERPC_DEF_SVR("MPC8321E",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8321E, e300),
+#endif
+#if defined (TODO)
+ /* MPC8323 */
+ POWERPC_DEF_SVR("MPC8323",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8323, e300),
+#endif
+#if defined (TODO)
+ /* MPC8323E */
+ POWERPC_DEF_SVR("MPC8323E",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8323E, e300),
+#endif
+ /* MPC8343 */
+ POWERPC_DEF_SVR("MPC8343",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300),
+ /* MPC8343A */
+ POWERPC_DEF_SVR("MPC8343A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300),
+ /* MPC8343E */
+ POWERPC_DEF_SVR("MPC8343E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300),
+ /* MPC8343EA */
+ POWERPC_DEF_SVR("MPC8343EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300),
+ /* MPC8347 */
+ POWERPC_DEF_SVR("MPC8347",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347, e300),
+ /* MPC8347T */
+ POWERPC_DEF_SVR("MPC8347T",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300),
+ /* MPC8347P */
+ POWERPC_DEF_SVR("MPC8347P",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300),
+ /* MPC8347A */
+ POWERPC_DEF_SVR("MPC8347A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347A, e300),
+ /* MPC8347AT */
+ POWERPC_DEF_SVR("MPC8347AT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300),
+ /* MPC8347AP */
+ POWERPC_DEF_SVR("MPC8347AP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300),
+ /* MPC8347E */
+ POWERPC_DEF_SVR("MPC8347E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347E, e300),
+ /* MPC8347ET */
+ POWERPC_DEF_SVR("MPC8347ET",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300),
+ /* MPC8343EP */
+ POWERPC_DEF_SVR("MPC8347EP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300),
+ /* MPC8347EA */
+ POWERPC_DEF_SVR("MPC8347EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EA, e300),
+ /* MPC8347EAT */
+ POWERPC_DEF_SVR("MPC8347EAT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300),
+ /* MPC8343EAP */
+ POWERPC_DEF_SVR("MPC8347EAP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300),
+ /* MPC8349 */
+ POWERPC_DEF_SVR("MPC8349",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300),
+ /* MPC8349A */
+ POWERPC_DEF_SVR("MPC8349A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300),
+ /* MPC8349E */
+ POWERPC_DEF_SVR("MPC8349E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300),
+ /* MPC8349EA */
+ POWERPC_DEF_SVR("MPC8349EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300),
+#if defined (TODO)
+ /* MPC8358E */
+ POWERPC_DEF_SVR("MPC8358E",
+ CPU_POWERPC_MPC835x, POWERPC_SVR_8358E, e300),
+#endif
+#if defined (TODO)
+ /* MPC8360E */
+ POWERPC_DEF_SVR("MPC8360E",
+ CPU_POWERPC_MPC836x, POWERPC_SVR_8360E, e300),
+#endif
+ /* MPC8377 */
+ POWERPC_DEF_SVR("MPC8377",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300),
+ /* MPC8377E */
+ POWERPC_DEF_SVR("MPC8377E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300),
+ /* MPC8378 */
+ POWERPC_DEF_SVR("MPC8378",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300),
+ /* MPC8378E */
+ POWERPC_DEF_SVR("MPC8378E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300),
+ /* MPC8379 */
+ POWERPC_DEF_SVR("MPC8379",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300),
+ /* MPC8379E */
+ POWERPC_DEF_SVR("MPC8379E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300),
+ /* e500 family */
+ /* PowerPC e500 core */
+ POWERPC_DEF("e500", CPU_POWERPC_e500v2_v22, e500v2),
+ /* PowerPC e500v1 core */
+ POWERPC_DEF("e500v1", CPU_POWERPC_e500v1, e500v1),
+ /* PowerPC e500 v1.0 core */
+ POWERPC_DEF("e500_v10", CPU_POWERPC_e500v1_v10, e500v1),
+ /* PowerPC e500 v2.0 core */
+ POWERPC_DEF("e500_v20", CPU_POWERPC_e500v1_v20, e500v1),
+ /* PowerPC e500v2 core */
+ POWERPC_DEF("e500v2", CPU_POWERPC_e500v2, e500v2),
+ /* PowerPC e500v2 v1.0 core */
+ POWERPC_DEF("e500v2_v10", CPU_POWERPC_e500v2_v10, e500v2),
+ /* PowerPC e500v2 v2.0 core */
+ POWERPC_DEF("e500v2_v20", CPU_POWERPC_e500v2_v20, e500v2),
+ /* PowerPC e500v2 v2.1 core */
+ POWERPC_DEF("e500v2_v21", CPU_POWERPC_e500v2_v21, e500v2),
+ /* PowerPC e500v2 v2.2 core */
+ POWERPC_DEF("e500v2_v22", CPU_POWERPC_e500v2_v22, e500v2),
+ /* PowerPC e500v2 v3.0 core */
+ POWERPC_DEF("e500v2_v30", CPU_POWERPC_e500v2_v30, e500v2),
+ /* PowerPC e500 microcontrollers */
+ /* MPC8533 */
+ POWERPC_DEF_SVR("MPC8533",
+ CPU_POWERPC_MPC8533, POWERPC_SVR_8533, e500v2),
+ /* MPC8533 v1.0 */
+ POWERPC_DEF_SVR("MPC8533_v10",
+ CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2),
+ /* MPC8533 v1.1 */
+ POWERPC_DEF_SVR("MPC8533_v11",
+ CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2),
+ /* MPC8533E */
+ POWERPC_DEF_SVR("MPC8533E",
+ CPU_POWERPC_MPC8533E, POWERPC_SVR_8533E, e500v2),
+ /* MPC8533E v1.0 */
+ POWERPC_DEF_SVR("MPC8533E_v10",
+ CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2),
+ POWERPC_DEF_SVR("MPC8533E_v11",
+ CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2),
+ /* MPC8540 */
+ POWERPC_DEF_SVR("MPC8540",
+ CPU_POWERPC_MPC8540, POWERPC_SVR_8540, e500v1),
+ /* MPC8540 v1.0 */
+ POWERPC_DEF_SVR("MPC8540_v10",
+ CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1),
+ /* MPC8540 v2.0 */
+ POWERPC_DEF_SVR("MPC8540_v20",
+ CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1),
+ /* MPC8540 v2.1 */
+ POWERPC_DEF_SVR("MPC8540_v21",
+ CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1),
+ /* MPC8541 */
+ POWERPC_DEF_SVR("MPC8541",
+ CPU_POWERPC_MPC8541, POWERPC_SVR_8541, e500v1),
+ /* MPC8541 v1.0 */
+ POWERPC_DEF_SVR("MPC8541_v10",
+ CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1),
+ /* MPC8541 v1.1 */
+ POWERPC_DEF_SVR("MPC8541_v11",
+ CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1),
+ /* MPC8541E */
+ POWERPC_DEF_SVR("MPC8541E",
+ CPU_POWERPC_MPC8541E, POWERPC_SVR_8541E, e500v1),
+ /* MPC8541E v1.0 */
+ POWERPC_DEF_SVR("MPC8541E_v10",
+ CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1),
+ /* MPC8541E v1.1 */
+ POWERPC_DEF_SVR("MPC8541E_v11",
+ CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1),
+ /* MPC8543 */
+ POWERPC_DEF_SVR("MPC8543",
+ CPU_POWERPC_MPC8543, POWERPC_SVR_8543, e500v2),
+ /* MPC8543 v1.0 */
+ POWERPC_DEF_SVR("MPC8543_v10",
+ CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2),
+ /* MPC8543 v1.1 */
+ POWERPC_DEF_SVR("MPC8543_v11",
+ CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2),
+ /* MPC8543 v2.0 */
+ POWERPC_DEF_SVR("MPC8543_v20",
+ CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2),
+ /* MPC8543 v2.1 */
+ POWERPC_DEF_SVR("MPC8543_v21",
+ CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2),
+ /* MPC8543E */
+ POWERPC_DEF_SVR("MPC8543E",
+ CPU_POWERPC_MPC8543E, POWERPC_SVR_8543E, e500v2),
+ /* MPC8543E v1.0 */
+ POWERPC_DEF_SVR("MPC8543E_v10",
+ CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2),
+ /* MPC8543E v1.1 */
+ POWERPC_DEF_SVR("MPC8543E_v11",
+ CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2),
+ /* MPC8543E v2.0 */
+ POWERPC_DEF_SVR("MPC8543E_v20",
+ CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2),
+ /* MPC8543E v2.1 */
+ POWERPC_DEF_SVR("MPC8543E_v21",
+ CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2),
+ /* MPC8544 */
+ POWERPC_DEF_SVR("MPC8544",
+ CPU_POWERPC_MPC8544, POWERPC_SVR_8544, e500v2),
+ /* MPC8544 v1.0 */
+ POWERPC_DEF_SVR("MPC8544_v10",
+ CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2),
+ /* MPC8544 v1.1 */
+ POWERPC_DEF_SVR("MPC8544_v11",
+ CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2),
+ /* MPC8544E */
+ POWERPC_DEF_SVR("MPC8544E",
+ CPU_POWERPC_MPC8544E, POWERPC_SVR_8544E, e500v2),
+ /* MPC8544E v1.0 */
+ POWERPC_DEF_SVR("MPC8544E_v10",
+ CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2),
+ /* MPC8544E v1.1 */
+ POWERPC_DEF_SVR("MPC8544E_v11",
+ CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2),
+ /* MPC8545 */
+ POWERPC_DEF_SVR("MPC8545",
+ CPU_POWERPC_MPC8545, POWERPC_SVR_8545, e500v2),
+ /* MPC8545 v2.0 */
+ POWERPC_DEF_SVR("MPC8545_v20",
+ CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2),
+ /* MPC8545 v2.1 */
+ POWERPC_DEF_SVR("MPC8545_v21",
+ CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2),
+ /* MPC8545E */
+ POWERPC_DEF_SVR("MPC8545E",
+ CPU_POWERPC_MPC8545E, POWERPC_SVR_8545E, e500v2),
+ /* MPC8545E v2.0 */
+ POWERPC_DEF_SVR("MPC8545E_v20",
+ CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2),
+ /* MPC8545E v2.1 */
+ POWERPC_DEF_SVR("MPC8545E_v21",
+ CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2),
+ /* MPC8547E */
+ POWERPC_DEF_SVR("MPC8547E",
+ CPU_POWERPC_MPC8547E, POWERPC_SVR_8547E, e500v2),
+ /* MPC8547E v2.0 */
+ POWERPC_DEF_SVR("MPC8547E_v20",
+ CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2),
+ /* MPC8547E v2.1 */
+ POWERPC_DEF_SVR("MPC8547E_v21",
+ CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2),
+ /* MPC8548 */
+ POWERPC_DEF_SVR("MPC8548",
+ CPU_POWERPC_MPC8548, POWERPC_SVR_8548, e500v2),
+ /* MPC8548 v1.0 */
+ POWERPC_DEF_SVR("MPC8548_v10",
+ CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2),
+ /* MPC8548 v1.1 */
+ POWERPC_DEF_SVR("MPC8548_v11",
+ CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2),
+ /* MPC8548 v2.0 */
+ POWERPC_DEF_SVR("MPC8548_v20",
+ CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2),
+ /* MPC8548 v2.1 */
+ POWERPC_DEF_SVR("MPC8548_v21",
+ CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2),
+ /* MPC8548E */
+ POWERPC_DEF_SVR("MPC8548E",
+ CPU_POWERPC_MPC8548E, POWERPC_SVR_8548E, e500v2),
+ /* MPC8548E v1.0 */
+ POWERPC_DEF_SVR("MPC8548E_v10",
+ CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2),
+ /* MPC8548E v1.1 */
+ POWERPC_DEF_SVR("MPC8548E_v11",
+ CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2),
+ /* MPC8548E v2.0 */
+ POWERPC_DEF_SVR("MPC8548E_v20",
+ CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2),
+ /* MPC8548E v2.1 */
+ POWERPC_DEF_SVR("MPC8548E_v21",
+ CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2),
+ /* MPC8555 */
+ POWERPC_DEF_SVR("MPC8555",
+ CPU_POWERPC_MPC8555, POWERPC_SVR_8555, e500v2),
+ /* MPC8555 v1.0 */
+ POWERPC_DEF_SVR("MPC8555_v10",
+ CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2),
+ /* MPC8555 v1.1 */
+ POWERPC_DEF_SVR("MPC8555_v11",
+ CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2),
+ /* MPC8555E */
+ POWERPC_DEF_SVR("MPC8555E",
+ CPU_POWERPC_MPC8555E, POWERPC_SVR_8555E, e500v2),
+ /* MPC8555E v1.0 */
+ POWERPC_DEF_SVR("MPC8555E_v10",
+ CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2),
+ /* MPC8555E v1.1 */
+ POWERPC_DEF_SVR("MPC8555E_v11",
+ CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2),
+ /* MPC8560 */
+ POWERPC_DEF_SVR("MPC8560",
+ CPU_POWERPC_MPC8560, POWERPC_SVR_8560, e500v2),
+ /* MPC8560 v1.0 */
+ POWERPC_DEF_SVR("MPC8560_v10",
+ CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2),
+ /* MPC8560 v2.0 */
+ POWERPC_DEF_SVR("MPC8560_v20",
+ CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2),
+ /* MPC8560 v2.1 */
+ POWERPC_DEF_SVR("MPC8560_v21",
+ CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2),
+ /* MPC8567 */
+ POWERPC_DEF_SVR("MPC8567",
+ CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2),
+ /* MPC8567E */
+ POWERPC_DEF_SVR("MPC8567E",
+ CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2),
+ /* MPC8568 */
+ POWERPC_DEF_SVR("MPC8568",
+ CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2),
+ /* MPC8568E */
+ POWERPC_DEF_SVR("MPC8568E",
+ CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2),
+ /* MPC8572 */
+ POWERPC_DEF_SVR("MPC8572",
+ CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2),
+ /* MPC8572E */
+ POWERPC_DEF_SVR("MPC8572E",
+ CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2),
+ /* e600 family */
+ /* PowerPC e600 core */
+ POWERPC_DEF("e600", CPU_POWERPC_e600, 7400),
+ /* PowerPC e600 microcontrollers */
+#if defined (TODO)
+ /* MPC8610 */
+ POWERPC_DEF_SVR("MPC8610",
+ CPU_POWERPC_MPC8610, POWERPC_SVR_8610, 7400),
+#endif
+ /* MPC8641 */
+ POWERPC_DEF_SVR("MPC8641",
+ CPU_POWERPC_MPC8641, POWERPC_SVR_8641, 7400),
+ /* MPC8641D */
+ POWERPC_DEF_SVR("MPC8641D",
+ CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, 7400),
+ /* 32 bits "classic" PowerPC */
+ /* PowerPC 6xx family */
+ /* PowerPC 601 */
+ POWERPC_DEF("601", CPU_POWERPC_601, 601v),
+ /* PowerPC 601v0 */
+ POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601),
+ /* PowerPC 601v1 */
+ POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601),
+ /* PowerPC 601v */
+ POWERPC_DEF("601v", CPU_POWERPC_601v, 601v),
+ /* PowerPC 601v2 */
+ POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v),
+ /* PowerPC 602 */
+ POWERPC_DEF("602", CPU_POWERPC_602, 602),
+ /* PowerPC 603 */
+ POWERPC_DEF("603", CPU_POWERPC_603, 603),
+ /* Code name for PowerPC 603 */
+ POWERPC_DEF("Vanilla", CPU_POWERPC_603, 603),
+ /* PowerPC 603e (aka PID6) */
+ POWERPC_DEF("603e", CPU_POWERPC_603E, 603E),
+ /* Code name for PowerPC 603e */
+ POWERPC_DEF("Stretch", CPU_POWERPC_603E, 603E),
+ /* PowerPC 603e v1.1 */
+ POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E),
+ /* PowerPC 603e v1.2 */
+ POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E),
+ /* PowerPC 603e v1.3 */
+ POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E),
+ /* PowerPC 603e v1.4 */
+ POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E),
+ /* PowerPC 603e v2.2 */
+ POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E),
+ /* PowerPC 603e v3 */
+ POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E),
+ /* PowerPC 603e v4 */
+ POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E),
+ /* PowerPC 603e v4.1 */
+ POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E),
+ /* PowerPC 603e (aka PID7) */
+ POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E),
+ /* PowerPC 603e7t */
+ POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E),
+ /* PowerPC 603e7v */
+ POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E),
+ /* Code name for PowerPC 603ev */
+ POWERPC_DEF("Vaillant", CPU_POWERPC_603E7v, 603E),
+ /* PowerPC 603e7v1 */
+ POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E),
+ /* PowerPC 603e7v2 */
+ POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E),
+ /* PowerPC 603p (aka PID7v) */
+ POWERPC_DEF("603p", CPU_POWERPC_603P, 603E),
+ /* PowerPC 603r (aka PID7t) */
+ POWERPC_DEF("603r", CPU_POWERPC_603R, 603E),
+ /* Code name for PowerPC 603r */
+ POWERPC_DEF("Goldeneye", CPU_POWERPC_603R, 603E),
+ /* PowerPC 604 */
+ POWERPC_DEF("604", CPU_POWERPC_604, 604),
+ /* PowerPC 604e (aka PID9) */
+ POWERPC_DEF("604e", CPU_POWERPC_604E, 604E),
+ /* Code name for PowerPC 604e */
+ POWERPC_DEF("Sirocco", CPU_POWERPC_604E, 604E),
+ /* PowerPC 604e v1.0 */
+ POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E),
+ /* PowerPC 604e v2.2 */
+ POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E),
+ /* PowerPC 604e v2.4 */
+ POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E),
+ /* PowerPC 604r (aka PIDA) */
+ POWERPC_DEF("604r", CPU_POWERPC_604R, 604E),
+ /* Code name for PowerPC 604r */
+ POWERPC_DEF("Mach5", CPU_POWERPC_604R, 604E),
+#if defined(TODO)
+ /* PowerPC 604ev */
+ POWERPC_DEF("604ev", CPU_POWERPC_604EV, 604E),
+#endif
+ /* PowerPC 7xx family */
+ /* Generic PowerPC 740 (G3) */
+ POWERPC_DEF("740", CPU_POWERPC_7x0, 740),
+ /* Code name for PowerPC 740 */
+ POWERPC_DEF("Arthur", CPU_POWERPC_7x0, 740),
+ /* Generic PowerPC 750 (G3) */
+ POWERPC_DEF("750", CPU_POWERPC_7x0, 750),
+ /* Code name for PowerPC 750 */
+ POWERPC_DEF("Typhoon", CPU_POWERPC_7x0, 750),
+ /* PowerPC 740/750 is also known as G3 */
+ POWERPC_DEF("G3", CPU_POWERPC_7x0, 750),
+ /* PowerPC 740 v1.0 (G3) */
+ POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740),
+ /* PowerPC 750 v1.0 (G3) */
+ POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750),
+ /* PowerPC 740 v2.0 (G3) */
+ POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740),
+ /* PowerPC 750 v2.0 (G3) */
+ POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750),
+ /* PowerPC 740 v2.1 (G3) */
+ POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740),
+ /* PowerPC 750 v2.1 (G3) */
+ POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750),
+ /* PowerPC 740 v2.2 (G3) */
+ POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740),
+ /* PowerPC 750 v2.2 (G3) */
+ POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750),
+ /* PowerPC 740 v3.0 (G3) */
+ POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740),
+ /* PowerPC 750 v3.0 (G3) */
+ POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750),
+ /* PowerPC 740 v3.1 (G3) */
+ POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740),
+ /* PowerPC 750 v3.1 (G3) */
+ POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750),
+ /* PowerPC 740E (G3) */
+ POWERPC_DEF("740e", CPU_POWERPC_740E, 740),
+ /* PowerPC 750E (G3) */
+ POWERPC_DEF("750e", CPU_POWERPC_750E, 750),
+ /* PowerPC 740P (G3) */
+ POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740),
+ /* PowerPC 750P (G3) */
+ POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750),
+ /* Code name for PowerPC 740P/750P (G3) */
+ POWERPC_DEF("Conan/Doyle", CPU_POWERPC_7x0P, 750),
+ /* PowerPC 750CL (G3 embedded) */
+ POWERPC_DEF("750cl", CPU_POWERPC_750CL, 750cl),
+ /* PowerPC 750CL v1.0 */
+ POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl),
+ /* PowerPC 750CL v2.0 */
+ POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl),
+ /* PowerPC 750CX (G3 embedded) */
+ POWERPC_DEF("750cx", CPU_POWERPC_750CX, 750cx),
+ /* PowerPC 750CX v1.0 (G3 embedded) */
+ POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx),
+ /* PowerPC 750CX v2.1 (G3 embedded) */
+ POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx),
+ /* PowerPC 750CX v2.1 (G3 embedded) */
+ POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx),
+ /* PowerPC 750CX v2.2 (G3 embedded) */
+ POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx),
+ /* PowerPC 750CXe (G3 embedded) */
+ POWERPC_DEF("750cxe", CPU_POWERPC_750CXE, 750cx),
+ /* PowerPC 750CXe v2.1 (G3 embedded) */
+ POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx),
+ /* PowerPC 750CXe v2.2 (G3 embedded) */
+ POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx),
+ /* PowerPC 750CXe v2.3 (G3 embedded) */
+ POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx),
+ /* PowerPC 750CXe v2.4 (G3 embedded) */
+ POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx),
+ /* PowerPC 750CXe v2.4b (G3 embedded) */
+ POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx),
+ /* PowerPC 750CXe v3.0 (G3 embedded) */
+ POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx),
+ /* PowerPC 750CXe v3.1 (G3 embedded) */
+ POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx),
+ /* PowerPC 750CXe v3.1b (G3 embedded) */
+ POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx),
+ /* PowerPC 750CXr (G3 embedded) */
+ POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx),
+ /* PowerPC 750FL (G3 embedded) */
+ POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx),
+ /* PowerPC 750FX (G3 embedded) */
+ POWERPC_DEF("750fx", CPU_POWERPC_750FX, 750fx),
+ /* PowerPC 750FX v1.0 (G3 embedded) */
+ POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx),
+ /* PowerPC 750FX v2.0 (G3 embedded) */
+ POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx),
+ /* PowerPC 750FX v2.1 (G3 embedded) */
+ POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx),
+ /* PowerPC 750FX v2.2 (G3 embedded) */
+ POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx),
+ /* PowerPC 750FX v2.3 (G3 embedded) */
+ POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx),
+ /* PowerPC 750GL (G3 embedded) */
+ POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx),
+ /* PowerPC 750GX (G3 embedded) */
+ POWERPC_DEF("750gx", CPU_POWERPC_750GX, 750gx),
+ /* PowerPC 750GX v1.0 (G3 embedded) */
+ POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx),
+ /* PowerPC 750GX v1.1 (G3 embedded) */
+ POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx),
+ /* PowerPC 750GX v1.2 (G3 embedded) */
+ POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx),
+ /* PowerPC 750L (G3 embedded) */
+ POWERPC_DEF("750l", CPU_POWERPC_750L, 750),
+ /* Code name for PowerPC 750L (G3 embedded) */
+ POWERPC_DEF("LoneStar", CPU_POWERPC_750L, 750),
+ /* PowerPC 750L v2.0 (G3 embedded) */
+ POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750),
+ /* PowerPC 750L v2.1 (G3 embedded) */
+ POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750),
+ /* PowerPC 750L v2.2 (G3 embedded) */
+ POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750),
+ /* PowerPC 750L v3.0 (G3 embedded) */
+ POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750),
+ /* PowerPC 750L v3.2 (G3 embedded) */
+ POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750),
+ /* Generic PowerPC 745 */
+ POWERPC_DEF("745", CPU_POWERPC_7x5, 745),
+ /* Generic PowerPC 755 */
+ POWERPC_DEF("755", CPU_POWERPC_7x5, 755),
+ /* Code name for PowerPC 745/755 */
+ POWERPC_DEF("Goldfinger", CPU_POWERPC_7x5, 755),
+ /* PowerPC 745 v1.0 */
+ POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745),
+ /* PowerPC 755 v1.0 */
+ POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755),
+ /* PowerPC 745 v1.1 */
+ POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745),
+ /* PowerPC 755 v1.1 */
+ POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755),
+ /* PowerPC 745 v2.0 */
+ POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745),
+ /* PowerPC 755 v2.0 */
+ POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755),
+ /* PowerPC 745 v2.1 */
+ POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745),
+ /* PowerPC 755 v2.1 */
+ POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755),
+ /* PowerPC 745 v2.2 */
+ POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745),
+ /* PowerPC 755 v2.2 */
+ POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755),
+ /* PowerPC 745 v2.3 */
+ POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745),
+ /* PowerPC 755 v2.3 */
+ POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755),
+ /* PowerPC 745 v2.4 */
+ POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745),
+ /* PowerPC 755 v2.4 */
+ POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755),
+ /* PowerPC 745 v2.5 */
+ POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745),
+ /* PowerPC 755 v2.5 */
+ POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755),
+ /* PowerPC 745 v2.6 */
+ POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745),
+ /* PowerPC 755 v2.6 */
+ POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755),
+ /* PowerPC 745 v2.7 */
+ POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745),
+ /* PowerPC 755 v2.7 */
+ POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755),
+ /* PowerPC 745 v2.8 */
+ POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745),
+ /* PowerPC 755 v2.8 */
+ POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755),
+#if defined (TODO)
+ /* PowerPC 745P (G3) */
+ POWERPC_DEF("745p", CPU_POWERPC_7x5P, 745),
+ /* PowerPC 755P (G3) */
+ POWERPC_DEF("755p", CPU_POWERPC_7x5P, 755),
+#endif
+ /* PowerPC 74xx family */
+ /* PowerPC 7400 (G4) */
+ POWERPC_DEF("7400", CPU_POWERPC_7400, 7400),
+ /* Code name for PowerPC 7400 */
+ POWERPC_DEF("Max", CPU_POWERPC_7400, 7400),
+ /* PowerPC 74xx is also well known as G4 */
+ POWERPC_DEF("G4", CPU_POWERPC_7400, 7400),
+ /* PowerPC 7400 v1.0 (G4) */
+ POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400),
+ /* PowerPC 7400 v1.1 (G4) */
+ POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400),
+ /* PowerPC 7400 v2.0 (G4) */
+ POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400),
+ /* PowerPC 7400 v2.1 (G4) */
+ POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400),
+ /* PowerPC 7400 v2.2 (G4) */
+ POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400),
+ /* PowerPC 7400 v2.6 (G4) */
+ POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400),
+ /* PowerPC 7400 v2.7 (G4) */
+ POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400),
+ /* PowerPC 7400 v2.8 (G4) */
+ POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400),
+ /* PowerPC 7400 v2.9 (G4) */
+ POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400),
+ /* PowerPC 7410 (G4) */
+ POWERPC_DEF("7410", CPU_POWERPC_7410, 7410),
+ /* Code name for PowerPC 7410 */
+ POWERPC_DEF("Nitro", CPU_POWERPC_7410, 7410),
+ /* PowerPC 7410 v1.0 (G4) */
+ POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410),
+ /* PowerPC 7410 v1.1 (G4) */
+ POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410),
+ /* PowerPC 7410 v1.2 (G4) */
+ POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410),
+ /* PowerPC 7410 v1.3 (G4) */
+ POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410),
+ /* PowerPC 7410 v1.4 (G4) */
+ POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410),
+ /* PowerPC 7448 (G4) */
+ POWERPC_DEF("7448", CPU_POWERPC_7448, 7400),
+ /* PowerPC 7448 v1.0 (G4) */
+ POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400),
+ /* PowerPC 7448 v1.1 (G4) */
+ POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400),
+ /* PowerPC 7448 v2.0 (G4) */
+ POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400),
+ /* PowerPC 7448 v2.1 (G4) */
+ POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400),
+ /* PowerPC 7450 (G4) */
+ POWERPC_DEF("7450", CPU_POWERPC_7450, 7450),
+ /* Code name for PowerPC 7450 */
+ POWERPC_DEF("Vger", CPU_POWERPC_7450, 7450),
+ /* PowerPC 7450 v1.0 (G4) */
+ POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450),
+ /* PowerPC 7450 v1.1 (G4) */
+ POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450),
+ /* PowerPC 7450 v1.2 (G4) */
+ POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450),
+ /* PowerPC 7450 v2.0 (G4) */
+ POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450),
+ /* PowerPC 7450 v2.1 (G4) */
+ POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450),
+ /* PowerPC 7441 (G4) */
+ POWERPC_DEF("7441", CPU_POWERPC_74x1, 7440),
+ /* PowerPC 7451 (G4) */
+ POWERPC_DEF("7451", CPU_POWERPC_74x1, 7450),
+ /* PowerPC 7441 v2.1 (G4) */
+ POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440),
+ /* PowerPC 7441 v2.3 (G4) */
+ POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440),
+ /* PowerPC 7451 v2.3 (G4) */
+ POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450),
+ /* PowerPC 7441 v2.10 (G4) */
+ POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440),
+ /* PowerPC 7451 v2.10 (G4) */
+ POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450),
+ /* PowerPC 7445 (G4) */
+ POWERPC_DEF("7445", CPU_POWERPC_74x5, 7445),
+ /* PowerPC 7455 (G4) */
+ POWERPC_DEF("7455", CPU_POWERPC_74x5, 7455),
+ /* Code name for PowerPC 7445/7455 */
+ POWERPC_DEF("Apollo6", CPU_POWERPC_74x5, 7455),
+ /* PowerPC 7445 v1.0 (G4) */
+ POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445),
+ /* PowerPC 7455 v1.0 (G4) */
+ POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455),
+ /* PowerPC 7445 v2.1 (G4) */
+ POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445),
+ /* PowerPC 7455 v2.1 (G4) */
+ POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455),
+ /* PowerPC 7445 v3.2 (G4) */
+ POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445),
+ /* PowerPC 7455 v3.2 (G4) */
+ POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455),
+ /* PowerPC 7445 v3.3 (G4) */
+ POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445),
+ /* PowerPC 7455 v3.3 (G4) */
+ POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455),
+ /* PowerPC 7445 v3.4 (G4) */
+ POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445),
+ /* PowerPC 7455 v3.4 (G4) */
+ POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455),
+ /* PowerPC 7447 (G4) */
+ POWERPC_DEF("7447", CPU_POWERPC_74x7, 7445),
+ /* PowerPC 7457 (G4) */
+ POWERPC_DEF("7457", CPU_POWERPC_74x7, 7455),
+ /* Code name for PowerPC 7447/7457 */
+ POWERPC_DEF("Apollo7", CPU_POWERPC_74x7, 7455),
+ /* PowerPC 7447 v1.0 (G4) */
+ POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445),
+ /* PowerPC 7457 v1.0 (G4) */
+ POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455),
+ /* PowerPC 7447 v1.1 (G4) */
+ POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445),
+ /* PowerPC 7457 v1.1 (G4) */
+ POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455),
+ /* PowerPC 7457 v1.2 (G4) */
+ POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455),
+ /* PowerPC 7447A (G4) */
+ POWERPC_DEF("7447A", CPU_POWERPC_74x7A, 7445),
+ /* PowerPC 7457A (G4) */
+ POWERPC_DEF("7457A", CPU_POWERPC_74x7A, 7455),
+ /* PowerPC 7447A v1.0 (G4) */
+ POWERPC_DEF("7447A_v1.0", CPU_POWERPC_74x7A_v10, 7445),
+ /* PowerPC 7457A v1.0 (G4) */
+ POWERPC_DEF("7457A_v1.0", CPU_POWERPC_74x7A_v10, 7455),
+ /* Code name for PowerPC 7447A/7457A */
+ POWERPC_DEF("Apollo7PM", CPU_POWERPC_74x7A_v10, 7455),
+ /* PowerPC 7447A v1.1 (G4) */
+ POWERPC_DEF("7447A_v1.1", CPU_POWERPC_74x7A_v11, 7445),
+ /* PowerPC 7457A v1.1 (G4) */
+ POWERPC_DEF("7457A_v1.1", CPU_POWERPC_74x7A_v11, 7455),
+ /* PowerPC 7447A v1.2 (G4) */
+ POWERPC_DEF("7447A_v1.2", CPU_POWERPC_74x7A_v12, 7445),
+ /* PowerPC 7457A v1.2 (G4) */
+ POWERPC_DEF("7457A_v1.2", CPU_POWERPC_74x7A_v12, 7455),
+ /* 64 bits PowerPC */
+#if defined (TARGET_PPC64)
+ /* PowerPC 620 */
+ POWERPC_DEF("620", CPU_POWERPC_620, 620),
+ /* Code name for PowerPC 620 */
+ POWERPC_DEF("Trident", CPU_POWERPC_620, 620),
+#if defined (TODO)
+ /* PowerPC 630 (POWER3) */
+ POWERPC_DEF("630", CPU_POWERPC_630, 630),
+ POWERPC_DEF("POWER3", CPU_POWERPC_630, 630),
+ /* Code names for POWER3 */
+ POWERPC_DEF("Boxer", CPU_POWERPC_630, 630),
+ POWERPC_DEF("Dino", CPU_POWERPC_630, 630),
+#endif
+#if defined (TODO)
+ /* PowerPC 631 (Power 3+) */
+ POWERPC_DEF("631", CPU_POWERPC_631, 631),
+ POWERPC_DEF("POWER3+", CPU_POWERPC_631, 631),
+#endif
+#if defined (TODO)
+ /* POWER4 */
+ POWERPC_DEF("POWER4", CPU_POWERPC_POWER4, POWER4),
+#endif
+#if defined (TODO)
+ /* POWER4p */
+ POWERPC_DEF("POWER4+", CPU_POWERPC_POWER4P, POWER4P),
+#endif
+#if defined (TODO)
+ /* POWER5 */
+ POWERPC_DEF("POWER5", CPU_POWERPC_POWER5, POWER5),
+ /* POWER5GR */
+ POWERPC_DEF("POWER5gr", CPU_POWERPC_POWER5GR, POWER5),
+#endif
+#if defined (TODO)
+ /* POWER5+ */
+ POWERPC_DEF("POWER5+", CPU_POWERPC_POWER5P, POWER5P),
+ /* POWER5GS */
+ POWERPC_DEF("POWER5gs", CPU_POWERPC_POWER5GS, POWER5P),
+#endif
+#if defined (TODO)
+ /* POWER6 */
+ POWERPC_DEF("POWER6", CPU_POWERPC_POWER6, POWER6),
+ /* POWER6 running in POWER5 mode */
+ POWERPC_DEF("POWER6_5", CPU_POWERPC_POWER6_5, POWER5),
+ /* POWER6A */
+ POWERPC_DEF("POWER6A", CPU_POWERPC_POWER6A, POWER6),
+#endif
+ /* PowerPC 970 */
+ POWERPC_DEF("970", CPU_POWERPC_970, 970),
+ /* PowerPC 970FX (G5) */
+ POWERPC_DEF("970fx", CPU_POWERPC_970FX, 970FX),
+ /* PowerPC 970FX v1.0 (G5) */
+ POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970FX),
+ /* PowerPC 970FX v2.0 (G5) */
+ POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970FX),
+ /* PowerPC 970FX v2.1 (G5) */
+ POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970FX),
+ /* PowerPC 970FX v3.0 (G5) */
+ POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970FX),
+ /* PowerPC 970FX v3.1 (G5) */
+ POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970FX),
+ /* PowerPC 970GX (G5) */
+ POWERPC_DEF("970gx", CPU_POWERPC_970GX, 970GX),
+ /* PowerPC 970MP */
+ POWERPC_DEF("970mp", CPU_POWERPC_970MP, 970MP),
+ /* PowerPC 970MP v1.0 */
+ POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970MP),
+ /* PowerPC 970MP v1.1 */
+ POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970MP),
+#if defined (TODO)
+ /* PowerPC Cell */
+ POWERPC_DEF("Cell", CPU_POWERPC_CELL, 970),
+#endif
+#if defined (TODO)
+ /* PowerPC Cell v1.0 */
+ POWERPC_DEF("Cell_v1.0", CPU_POWERPC_CELL_v10, 970),
+#endif
+#if defined (TODO)
+ /* PowerPC Cell v2.0 */
+ POWERPC_DEF("Cell_v2.0", CPU_POWERPC_CELL_v20, 970),
+#endif
+#if defined (TODO)
+ /* PowerPC Cell v3.0 */
+ POWERPC_DEF("Cell_v3.0", CPU_POWERPC_CELL_v30, 970),
+#endif
+#if defined (TODO)
+ /* PowerPC Cell v3.1 */
+ POWERPC_DEF("Cell_v3.1", CPU_POWERPC_CELL_v31, 970),
+#endif
+#if defined (TODO)
+ /* PowerPC Cell v3.2 */
+ POWERPC_DEF("Cell_v3.2", CPU_POWERPC_CELL_v32, 970),
+#endif
+#if defined (TODO)
+ /* RS64 (Apache/A35) */
+ /* This one seems to support the whole POWER2 instruction set
+ * and the PowerPC 64 one.
+ */
+ /* What about A10 & A30 ? */
+ POWERPC_DEF("RS64", CPU_POWERPC_RS64, RS64),
+ POWERPC_DEF("Apache", CPU_POWERPC_RS64, RS64),
+ POWERPC_DEF("A35", CPU_POWERPC_RS64, RS64),
+#endif
+#if defined (TODO)
+ /* RS64-II (NorthStar/A50) */
+ POWERPC_DEF("RS64-II", CPU_POWERPC_RS64II, RS64),
+ POWERPC_DEF("NorthStar", CPU_POWERPC_RS64II, RS64),
+ POWERPC_DEF("A50", CPU_POWERPC_RS64II, RS64),
+#endif
+#if defined (TODO)
+ /* RS64-III (Pulsar) */
+ POWERPC_DEF("RS64-III", CPU_POWERPC_RS64III, RS64),
+ POWERPC_DEF("Pulsar", CPU_POWERPC_RS64III, RS64),
+#endif
+#if defined (TODO)
+ /* RS64-IV (IceStar/IStar/SStar) */
+ POWERPC_DEF("RS64-IV", CPU_POWERPC_RS64IV, RS64),
+ POWERPC_DEF("IceStar", CPU_POWERPC_RS64IV, RS64),
+ POWERPC_DEF("IStar", CPU_POWERPC_RS64IV, RS64),
+ POWERPC_DEF("SStar", CPU_POWERPC_RS64IV, RS64),
+#endif
+#endif /* defined (TARGET_PPC64) */
+ /* POWER */
+#if defined (TODO)
+ /* Original POWER */
+ POWERPC_DEF("POWER", CPU_POWERPC_POWER, POWER),
+ POWERPC_DEF("RIOS", CPU_POWERPC_POWER, POWER),
+ POWERPC_DEF("RSC", CPU_POWERPC_POWER, POWER),
+ POWERPC_DEF("RSC3308", CPU_POWERPC_POWER, POWER),
+ POWERPC_DEF("RSC4608", CPU_POWERPC_POWER, POWER),
+#endif
+#if defined (TODO)
+ /* POWER2 */
+ POWERPC_DEF("POWER2", CPU_POWERPC_POWER2, POWER),
+ POWERPC_DEF("RSC2", CPU_POWERPC_POWER2, POWER),
+ POWERPC_DEF("P2SC", CPU_POWERPC_POWER2, POWER),
+#endif
+ /* PA semi cores */
+#if defined (TODO)
+ /* PA PA6T */
+ POWERPC_DEF("PA6T", CPU_POWERPC_PA6T, PA6T),
+#endif
+ /* Generic PowerPCs */
+#if defined (TARGET_PPC64)
+ POWERPC_DEF("ppc64", CPU_POWERPC_PPC64, PPC64),
+#endif
+ POWERPC_DEF("ppc32", CPU_POWERPC_PPC32, PPC32),
+ POWERPC_DEF("ppc", CPU_POWERPC_DEFAULT, DEFAULT),
+ /* Fallback */
+ POWERPC_DEF("default", CPU_POWERPC_DEFAULT, DEFAULT),
+};
+
+/*****************************************************************************/
+/* Generic CPU instanciation routine */
+static void init_ppc_proc (CPUPPCState *env, const ppc_def_t *def)
+{
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+
+ env->irq_inputs = NULL;
+ /* Set all exception vectors to an invalid address */
+ for (i = 0; i < POWERPC_EXCP_NB; i++)
+ env->excp_vectors[i] = (target_ulong)(-1ULL);
+ env->hreset_excp_prefix = 0x00000000;
+ env->ivor_mask = 0x00000000;
+ env->ivpr_mask = 0x00000000;
+ /* Default MMU definitions */
+ env->nb_BATs = 0;
+ env->nb_tlb = 0;
+ env->nb_ways = 0;
+#endif
+ /* Register SPR common to all PowerPC implementations */
+ gen_spr_generic(env);
+ spr_register(env, SPR_PVR, "PVR",
+ /* Linux permits userspace to read PVR */
+#if defined(CONFIG_LINUX_USER)
+ &spr_read_generic,
+#else
+ SPR_NOACCESS,
+#endif
+ SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ def->pvr);
+ /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
+ if (def->svr != POWERPC_SVR_NONE) {
+ if (def->svr & POWERPC_SVR_E500) {
+ spr_register(env, SPR_E500_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ def->svr & ~POWERPC_SVR_E500);
+ } else {
+ spr_register(env, SPR_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ def->svr);
+ }
+ }
+ /* PowerPC implementation specific initialisations (SPRs, timers, ...) */
+ (*def->init_proc)(env);
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_prefix = env->hreset_excp_prefix;
+#endif
+ /* MSR bits & flags consistency checks */
+ if (env->msr_mask & (1 << 25)) {
+ switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ case POWERPC_FLAG_SPE:
+ case POWERPC_FLAG_VRE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 17)) {
+ switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ case POWERPC_FLAG_TGPR:
+ case POWERPC_FLAG_CE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 10)) {
+ switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ case POWERPC_FLAG_SE:
+ case POWERPC_FLAG_DWE:
+ case POWERPC_FLAG_UBLE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 9)) {
+ switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ case POWERPC_FLAG_BE:
+ case POWERPC_FLAG_DE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 2)) {
+ switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ case POWERPC_FLAG_PX:
+ case POWERPC_FLAG_PMM:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) {
+ fprintf(stderr, "PowerPC flags inconsistency\n"
+ "Should define the time-base and decrementer clock source\n");
+ exit(1);
+ }
+ /* Allocate TLBs buffer when needed */
+#if !defined(CONFIG_USER_ONLY)
+ if (env->nb_tlb != 0) {
+ int nb_tlb = env->nb_tlb;
+ if (env->id_tlbs != 0)
+ nb_tlb *= 2;
+ env->tlb = qemu_mallocz(nb_tlb * sizeof(ppc_tlb_t));
+ /* Pre-compute some useful values */
+ env->tlb_per_way = env->nb_tlb / env->nb_ways;
+ }
+ if (env->irq_inputs == NULL) {
+ fprintf(stderr, "WARNING: no internal IRQ controller registered.\n"
+ " Attempt Qemu to crash very soon !\n");
+ }
+#endif
+ if (env->check_pow == NULL) {
+ fprintf(stderr, "WARNING: no power management check handler "
+ "registered.\n"
+ " Attempt Qemu to crash very soon !\n");
+ }
+}
+
+#if defined(PPC_DUMP_CPU)
+static void dump_ppc_sprs (CPUPPCState *env)
+{
+ ppc_spr_t *spr;
+#if !defined(CONFIG_USER_ONLY)
+ uint32_t sr, sw;
+#endif
+ uint32_t ur, uw;
+ int i, j, n;
+
+ printf("Special purpose registers:\n");
+ for (i = 0; i < 32; i++) {
+ for (j = 0; j < 32; j++) {
+ n = (i << 5) | j;
+ spr = &env->spr_cb[n];
+ uw = spr->uea_write != NULL && spr->uea_write != SPR_NOACCESS;
+ ur = spr->uea_read != NULL && spr->uea_read != SPR_NOACCESS;
+#if !defined(CONFIG_USER_ONLY)
+ sw = spr->oea_write != NULL && spr->oea_write != SPR_NOACCESS;
+ sr = spr->oea_read != NULL && spr->oea_read != SPR_NOACCESS;
+ if (sw || sr || uw || ur) {
+ printf("SPR: %4d (%03x) %-8s s%c%c u%c%c\n",
+ (i << 5) | j, (i << 5) | j, spr->name,
+ sw ? 'w' : '-', sr ? 'r' : '-',
+ uw ? 'w' : '-', ur ? 'r' : '-');
+ }
+#else
+ if (uw || ur) {
+ printf("SPR: %4d (%03x) %-8s u%c%c\n",
+ (i << 5) | j, (i << 5) | j, spr->name,
+ uw ? 'w' : '-', ur ? 'r' : '-');
+ }
+#endif
+ }
+ }
+ fflush(stdout);
+ fflush(stderr);
+}
+#endif
+
+/*****************************************************************************/
+#include <stdlib.h>
+#include <string.h>
+
+/* Opcode types */
+enum {
+ PPC_DIRECT = 0, /* Opcode routine */
+ PPC_INDIRECT = 1, /* Indirect opcode table */
+};
+
+static inline int is_indirect_opcode (void *handler)
+{
+ return ((unsigned long)handler & 0x03) == PPC_INDIRECT;
+}
+
+static inline opc_handler_t **ind_table(void *handler)
+{
+ return (opc_handler_t **)((unsigned long)handler & ~3);
+}
+
+/* Instruction table creation */
+/* Opcodes tables creation */
+static void fill_new_table (opc_handler_t **table, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ table[i] = &invalid_handler;
+}
+
+static int create_new_table (opc_handler_t **table, unsigned char idx)
+{
+ opc_handler_t **tmp;
+
+ tmp = malloc(0x20 * sizeof(opc_handler_t));
+ fill_new_table(tmp, 0x20);
+ table[idx] = (opc_handler_t *)((unsigned long)tmp | PPC_INDIRECT);
+
+ return 0;
+}
+
+static int insert_in_table (opc_handler_t **table, unsigned char idx,
+ opc_handler_t *handler)
+{
+ if (table[idx] != &invalid_handler)
+ return -1;
+ table[idx] = handler;
+
+ return 0;
+}
+
+static int register_direct_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx, opc_handler_t *handler)
+{
+ if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in main "
+ "opcode table\n", idx);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ppc_opcodes[idx]->oname, handler->oname);
+#endif
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_in_table (opc_handler_t **table,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ if (table[idx1] == &invalid_handler) {
+ if (create_new_table(table, idx1) < 0) {
+ printf("*** ERROR: unable to create indirect table "
+ "idx=%02x\n", idx1);
+ return -1;
+ }
+ } else {
+ if (!is_indirect_opcode(table[idx1])) {
+ printf("*** ERROR: idx %02x already assigned to a direct "
+ "opcode\n", idx1);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ind_table(table[idx1])[idx2]->oname, handler->oname);
+#endif
+ return -1;
+ }
+ }
+ if (handler != NULL &&
+ insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in "
+ "opcode table %02x\n", idx2, idx1);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ind_table(table[idx1])[idx2]->oname, handler->oname);
+#endif
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ int ret;
+
+ ret = register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
+
+ return ret;
+}
+
+static int register_dblind_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, opc_handler_t *handler)
+{
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
+ handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_insn (opc_handler_t **ppc_opcodes, opcode_t *insn)
+{
+ if (insn->opc2 != 0xFF) {
+ if (insn->opc3 != 0xFF) {
+ if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, &insn->handler) < 0)
+ return -1;
+ } else {
+ if (register_ind_insn(ppc_opcodes, insn->opc1,
+ insn->opc2, &insn->handler) < 0)
+ return -1;
+ }
+ } else {
+ if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int test_opcode_table (opc_handler_t **table, int len)
+{
+ int i, count, tmp;
+
+ for (i = 0, count = 0; i < len; i++) {
+ /* Consistency fixup */
+ if (table[i] == NULL)
+ table[i] = &invalid_handler;
+ if (table[i] != &invalid_handler) {
+ if (is_indirect_opcode(table[i])) {
+ tmp = test_opcode_table(ind_table(table[i]), 0x20);
+ if (tmp == 0) {
+ free(table[i]);
+ table[i] = &invalid_handler;
+ } else {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void fix_opcode_tables (opc_handler_t **ppc_opcodes)
+{
+ if (test_opcode_table(ppc_opcodes, 0x40) == 0)
+ printf("*** WARNING: no opcode defined !\n");
+}
+
+/*****************************************************************************/
+static int create_ppc_opcodes (CPUPPCState *env, const ppc_def_t *def)
+{
+ opcode_t *opc;
+
+ fill_new_table(env->opcodes, 0x40);
+ for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
+ if ((opc->handler.type & def->insns_flags) != 0) {
+ if (register_insn(env->opcodes, opc) < 0) {
+ printf("*** ERROR initializing PowerPC instruction "
+ "0x%02x 0x%02x 0x%02x\n", opc->opc1, opc->opc2,
+ opc->opc3);
+ return -1;
+ }
+ }
+ }
+ fix_opcode_tables(env->opcodes);
+ fflush(stdout);
+ fflush(stderr);
+
+ return 0;
+}
+
+#if defined(PPC_DUMP_CPU)
+static void dump_ppc_insns (CPUPPCState *env)
+{
+ opc_handler_t **table, *handler;
+ const char *p, *q;
+ uint8_t opc1, opc2, opc3;
+
+ printf("Instructions set:\n");
+ /* opc1 is 6 bits long */
+ for (opc1 = 0x00; opc1 < 0x40; opc1++) {
+ table = env->opcodes;
+ handler = table[opc1];
+ if (is_indirect_opcode(handler)) {
+ /* opc2 is 5 bits long */
+ for (opc2 = 0; opc2 < 0x20; opc2++) {
+ table = env->opcodes;
+ handler = env->opcodes[opc1];
+ table = ind_table(handler);
+ handler = table[opc2];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ /* opc3 is 5 bits long */
+ for (opc3 = 0; opc3 < 0x20; opc3++) {
+ handler = table[opc3];
+ if (handler->handler != &gen_invalid) {
+ /* Special hack to properly dump SPE insns */
+ p = strchr(handler->oname, '_');
+ if (p == NULL) {
+ printf("INSN: %02x %02x %02x (%02d %04d) : "
+ "%s\n",
+ opc1, opc2, opc3, opc1,
+ (opc3 << 5) | opc2,
+ handler->oname);
+ } else {
+ q = "speundef";
+ if ((p - handler->oname) != strlen(q) ||
+ memcmp(handler->oname, q, strlen(q)) != 0) {
+ /* First instruction */
+ printf("INSN: %02x %02x %02x (%02d %04d) : "
+ "%.*s\n",
+ opc1, opc2 << 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1),
+ (int)(p - handler->oname),
+ handler->oname);
+ }
+ if (strcmp(p + 1, q) != 0) {
+ /* Second instruction */
+ printf("INSN: %02x %02x %02x (%02d %04d) : "
+ "%s\n",
+ opc1, (opc2 << 1) | 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1) | 1,
+ p + 1);
+ }
+ }
+ }
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x %02x -- (%02d %04d) : %s\n",
+ opc1, opc2, opc1, opc2, handler->oname);
+ }
+ }
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x -- -- (%02d ----) : %s\n",
+ opc1, opc1, handler->oname);
+ }
+ }
+ }
+}
+#endif
+
+static int gdb_get_float_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ stfq_p(mem_buf, env->fpr[n]);
+ return 8;
+ }
+ if (n == 32) {
+ /* FPSCR not implemented */
+ memset(mem_buf, 0, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_float_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ env->fpr[n] = ldfq_p(mem_buf);
+ return 8;
+ }
+ if (n == 32) {
+ /* FPSCR not implemented */
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_avr_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#ifdef HOST_WORDS_BIGENDIAN
+ stq_p(mem_buf, env->avr[n].u64[0]);
+ stq_p(mem_buf+8, env->avr[n].u64[1]);
+#else
+ stq_p(mem_buf, env->avr[n].u64[1]);
+ stq_p(mem_buf+8, env->avr[n].u64[0]);
+#endif
+ return 16;
+ }
+ if (n == 32) {
+ stl_p(mem_buf, env->vscr);
+ return 4;
+ }
+ if (n == 33) {
+ stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_avr_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#ifdef HOST_WORDS_BIGENDIAN
+ env->avr[n].u64[0] = ldq_p(mem_buf);
+ env->avr[n].u64[1] = ldq_p(mem_buf+8);
+#else
+ env->avr[n].u64[1] = ldq_p(mem_buf);
+ env->avr[n].u64[0] = ldq_p(mem_buf+8);
+#endif
+ return 16;
+ }
+ if (n == 32) {
+ env->vscr = ldl_p(mem_buf);
+ return 4;
+ }
+ if (n == 33) {
+ env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_spe_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ stl_p(mem_buf, env->gpr[n] >> 32);
+#else
+ stl_p(mem_buf, env->gprh[n]);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ stq_p(mem_buf, env->spe_acc);
+ return 8;
+ }
+ if (n == 33) {
+ stl_p(mem_buf, env->spe_fscr);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_spe_reg(CPUState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ target_ulong lo = (uint32_t)env->gpr[n];
+ target_ulong hi = (target_ulong)ldl_p(mem_buf) << 32;
+ env->gpr[n] = lo | hi;
+#else
+ env->gprh[n] = ldl_p(mem_buf);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ env->spe_acc = ldq_p(mem_buf);
+ return 8;
+ }
+ if (n == 33) {
+ env->spe_fscr = ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def)
+{
+ env->msr_mask = def->msr_mask;
+ env->mmu_model = def->mmu_model;
+ env->excp_model = def->excp_model;
+ env->bus_model = def->bus_model;
+ env->insns_flags = def->insns_flags;
+ env->flags = def->flags;
+ env->bfd_mach = def->bfd_mach;
+ env->check_pow = def->check_pow;
+ if (create_ppc_opcodes(env, def) < 0)
+ return -1;
+ init_ppc_proc(env, def);
+
+ if (def->insns_flags & PPC_FLOAT) {
+ gdb_register_coprocessor(env, gdb_get_float_reg, gdb_set_float_reg,
+ 33, "power-fpu.xml", 0);
+ }
+ if (def->insns_flags & PPC_ALTIVEC) {
+ gdb_register_coprocessor(env, gdb_get_avr_reg, gdb_set_avr_reg,
+ 34, "power-altivec.xml", 0);
+ }
+ if (def->insns_flags & PPC_SPE) {
+ gdb_register_coprocessor(env, gdb_get_spe_reg, gdb_set_spe_reg,
+ 34, "power-spe.xml", 0);
+ }
+
+#if defined(PPC_DUMP_CPU)
+ {
+ const char *mmu_model, *excp_model, *bus_model;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ mmu_model = "PowerPC 32";
+ break;
+ case POWERPC_MMU_SOFT_6xx:
+ mmu_model = "PowerPC 6xx/7xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_74xx:
+ mmu_model = "PowerPC 74xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ mmu_model = "PowerPC 4xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_4xx_Z:
+ mmu_model = "PowerPC 4xx with software driven TLBs "
+ "and zones protections";
+ break;
+ case POWERPC_MMU_REAL:
+ mmu_model = "PowerPC real mode only";
+ break;
+ case POWERPC_MMU_MPC8xx:
+ mmu_model = "PowerPC MPC8xx";
+ break;
+ case POWERPC_MMU_BOOKE:
+ mmu_model = "PowerPC BookE";
+ break;
+ case POWERPC_MMU_BOOKE_FSL:
+ mmu_model = "PowerPC BookE FSL";
+ break;
+ case POWERPC_MMU_601:
+ mmu_model = "PowerPC 601";
+ break;
+#if defined (TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ mmu_model = "PowerPC 64";
+ break;
+ case POWERPC_MMU_620:
+ mmu_model = "PowerPC 620";
+ break;
+#endif
+ default:
+ mmu_model = "Unknown or invalid";
+ break;
+ }
+ switch (env->excp_model) {
+ case POWERPC_EXCP_STD:
+ excp_model = "PowerPC";
+ break;
+ case POWERPC_EXCP_40x:
+ excp_model = "PowerPC 40x";
+ break;
+ case POWERPC_EXCP_601:
+ excp_model = "PowerPC 601";
+ break;
+ case POWERPC_EXCP_602:
+ excp_model = "PowerPC 602";
+ break;
+ case POWERPC_EXCP_603:
+ excp_model = "PowerPC 603";
+ break;
+ case POWERPC_EXCP_603E:
+ excp_model = "PowerPC 603e";
+ break;
+ case POWERPC_EXCP_604:
+ excp_model = "PowerPC 604";
+ break;
+ case POWERPC_EXCP_7x0:
+ excp_model = "PowerPC 740/750";
+ break;
+ case POWERPC_EXCP_7x5:
+ excp_model = "PowerPC 745/755";
+ break;
+ case POWERPC_EXCP_74xx:
+ excp_model = "PowerPC 74xx";
+ break;
+ case POWERPC_EXCP_BOOKE:
+ excp_model = "PowerPC BookE";
+ break;
+#if defined (TARGET_PPC64)
+ case POWERPC_EXCP_970:
+ excp_model = "PowerPC 970";
+ break;
+#endif
+ default:
+ excp_model = "Unknown or invalid";
+ break;
+ }
+ switch (env->bus_model) {
+ case PPC_FLAGS_INPUT_6xx:
+ bus_model = "PowerPC 6xx";
+ break;
+ case PPC_FLAGS_INPUT_BookE:
+ bus_model = "PowerPC BookE";
+ break;
+ case PPC_FLAGS_INPUT_405:
+ bus_model = "PowerPC 405";
+ break;
+ case PPC_FLAGS_INPUT_401:
+ bus_model = "PowerPC 401/403";
+ break;
+ case PPC_FLAGS_INPUT_RCPU:
+ bus_model = "RCPU / MPC8xx";
+ break;
+#if defined (TARGET_PPC64)
+ case PPC_FLAGS_INPUT_970:
+ bus_model = "PowerPC 970";
+ break;
+#endif
+ default:
+ bus_model = "Unknown or invalid";
+ break;
+ }
+ printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n"
+ " MMU model : %s\n",
+ def->name, def->pvr, def->msr_mask, mmu_model);
+#if !defined(CONFIG_USER_ONLY)
+ if (env->tlb != NULL) {
+ printf(" %d %s TLB in %d ways\n",
+ env->nb_tlb, env->id_tlbs ? "splitted" : "merged",
+ env->nb_ways);
+ }
+#endif
+ printf(" Exceptions model : %s\n"
+ " Bus model : %s\n",
+ excp_model, bus_model);
+ printf(" MSR features :\n");
+ if (env->flags & POWERPC_FLAG_SPE)
+ printf(" signal processing engine enable"
+ "\n");
+ else if (env->flags & POWERPC_FLAG_VRE)
+ printf(" vector processor enable\n");
+ if (env->flags & POWERPC_FLAG_TGPR)
+ printf(" temporary GPRs\n");
+ else if (env->flags & POWERPC_FLAG_CE)
+ printf(" critical input enable\n");
+ if (env->flags & POWERPC_FLAG_SE)
+ printf(" single-step trace mode\n");
+ else if (env->flags & POWERPC_FLAG_DWE)
+ printf(" debug wait enable\n");
+ else if (env->flags & POWERPC_FLAG_UBLE)
+ printf(" user BTB lock enable\n");
+ if (env->flags & POWERPC_FLAG_BE)
+ printf(" branch-step trace mode\n");
+ else if (env->flags & POWERPC_FLAG_DE)
+ printf(" debug interrupt enable\n");
+ if (env->flags & POWERPC_FLAG_PX)
+ printf(" inclusive protection\n");
+ else if (env->flags & POWERPC_FLAG_PMM)
+ printf(" performance monitor mark\n");
+ if (env->flags == POWERPC_FLAG_NONE)
+ printf(" none\n");
+ printf(" Time-base/decrementer clock source: %s\n",
+ env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
+ }
+ dump_ppc_insns(env);
+ dump_ppc_sprs(env);
+ fflush(stdout);
+#endif
+
+ return 0;
+}
+
+static const ppc_def_t *ppc_find_by_pvr (uint32_t pvr)
+{
+ const ppc_def_t *ret;
+ uint32_t pvr_rev;
+ int i, best, match, best_match, max;
+
+ ret = NULL;
+ max = ARRAY_SIZE(ppc_defs);
+ best = -1;
+ pvr_rev = pvr & 0xFFFF;
+ /* We want all specified bits to match */
+ best_match = 32 - ctz32(pvr_rev);
+ for (i = 0; i < max; i++) {
+ /* We check that the 16 higher bits are the same to ensure the CPU
+ * model will be the choosen one.
+ */
+ if (((pvr ^ ppc_defs[i].pvr) >> 16) == 0) {
+ /* We want as much as possible of the low-level 16 bits
+ * to be the same but we allow inexact matches.
+ */
+ match = clz32(pvr_rev ^ (ppc_defs[i].pvr & 0xFFFF));
+ /* We check '>=' instead of '>' because the PPC_defs table
+ * is ordered by increasing revision.
+ * Then, we will match the higher revision compatible
+ * with the requested PVR
+ */
+ if (match >= best_match) {
+ best = i;
+ best_match = match;
+ }
+ }
+ }
+ if (best != -1)
+ ret = &ppc_defs[best];
+
+ return ret;
+}
+
+#include <ctype.h>
+
+const ppc_def_t *cpu_ppc_find_by_name (const char *name)
+{
+ const ppc_def_t *ret;
+ const char *p;
+ int i, max, len;
+
+ /* Check if the given name is a PVR */
+ len = strlen(name);
+ if (len == 10 && name[0] == '0' && name[1] == 'x') {
+ p = name + 2;
+ goto check_pvr;
+ } else if (len == 8) {
+ p = name;
+ check_pvr:
+ for (i = 0; i < 8; i++) {
+ if (!qemu_isxdigit(*p++))
+ break;
+ }
+ if (i == 8)
+ return ppc_find_by_pvr(strtoul(name, NULL, 16));
+ }
+ ret = NULL;
+ max = ARRAY_SIZE(ppc_defs);
+ for (i = 0; i < max; i++) {
+ if (strcasecmp(name, ppc_defs[i].name) == 0) {
+ ret = &ppc_defs[i];
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf)
+{
+ int i, max;
+
+ max = ARRAY_SIZE(ppc_defs);
+ for (i = 0; i < max; i++) {
+ (*cpu_fprintf)(f, "PowerPC %-16s PVR %08x\n",
+ ppc_defs[i].name, ppc_defs[i].pvr);
+ }
+}