summaryrefslogtreecommitdiff
path: root/linux-user/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/main.c')
-rw-r--r--linux-user/main.c3375
1 files changed, 3375 insertions, 0 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
new file mode 100644
index 0000000..0d627d6
--- /dev/null
+++ b/linux-user/main.c
@@ -0,0 +1,3375 @@
+/*
+ * qemu user main
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/resource.h>
+
+#include "qemu.h"
+#include "qemu-common.h"
+#include "cache-utils.h"
+/* For tb_lock */
+#include "exec-all.h"
+#include "tcg.h"
+#include "qemu-timer.h"
+#include "envlist.h"
+
+#define DEBUG_LOGFILE "/tmp/qemu.log"
+
+char *exec_path;
+
+int singlestep;
+unsigned long mmap_min_addr;
+#if defined(CONFIG_USE_GUEST_BASE)
+unsigned long guest_base;
+int have_guest_base;
+unsigned long reserved_va;
+#endif
+
+static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
+const char *qemu_uname_release = CONFIG_UNAME_RELEASE;
+
+/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
+ we allocate a bigger stack. Need a better solution, for example
+ by remapping the process stack directly at the right place */
+unsigned long guest_stack_size = 8 * 1024 * 1024UL;
+
+void gemu_log(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+#if defined(TARGET_I386)
+int cpu_get_pic_interrupt(CPUState *env)
+{
+ return -1;
+}
+#endif
+
+/* timers for rdtsc */
+
+#if 0
+
+static uint64_t emu_time;
+
+int64_t cpu_get_real_ticks(void)
+{
+ return emu_time++;
+}
+
+#endif
+
+#if defined(CONFIG_USE_NPTL)
+/***********************************************************/
+/* Helper routines for implementing atomic operations. */
+
+/* To implement exclusive operations we force all cpus to syncronise.
+ We don't require a full sync, only that no cpus are executing guest code.
+ The alternative is to map target atomic ops onto host equivalents,
+ which requires quite a lot of per host/target work. */
+static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static int pending_cpus;
+
+/* Make sure everything is in a consistent state for calling fork(). */
+void fork_start(void)
+{
+ pthread_mutex_lock(&tb_lock);
+ pthread_mutex_lock(&exclusive_lock);
+ mmap_fork_start();
+}
+
+void fork_end(int child)
+{
+ mmap_fork_end(child);
+ if (child) {
+ /* Child processes created by fork() only have a single thread.
+ Discard information about the parent threads. */
+ first_cpu = thread_env;
+ thread_env->next_cpu = NULL;
+ pending_cpus = 0;
+ pthread_mutex_init(&exclusive_lock, NULL);
+ pthread_mutex_init(&cpu_list_mutex, NULL);
+ pthread_cond_init(&exclusive_cond, NULL);
+ pthread_cond_init(&exclusive_resume, NULL);
+ pthread_mutex_init(&tb_lock, NULL);
+ gdbserver_fork(thread_env);
+ } else {
+ pthread_mutex_unlock(&exclusive_lock);
+ pthread_mutex_unlock(&tb_lock);
+ }
+}
+
+/* Wait for pending exclusive operations to complete. The exclusive lock
+ must be held. */
+static inline void exclusive_idle(void)
+{
+ while (pending_cpus) {
+ pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ }
+}
+
+/* Start an exclusive operation.
+ Must only be called from outside cpu_arm_exec. */
+static inline void start_exclusive(void)
+{
+ CPUState *other;
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+
+ pending_cpus = 1;
+ /* Make all other cpus stop executing. */
+ for (other = first_cpu; other; other = other->next_cpu) {
+ if (other->running) {
+ pending_cpus++;
+ cpu_exit(other);
+ }
+ }
+ if (pending_cpus > 1) {
+ pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ }
+}
+
+/* Finish an exclusive operation. */
+static inline void end_exclusive(void)
+{
+ pending_cpus = 0;
+ pthread_cond_broadcast(&exclusive_resume);
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Wait for exclusive ops to finish, and begin cpu execution. */
+static inline void cpu_exec_start(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+ env->running = 1;
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Mark cpu as not executing, and release pending exclusive ops. */
+static inline void cpu_exec_end(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ env->running = 0;
+ if (pending_cpus > 1) {
+ pending_cpus--;
+ if (pending_cpus == 1) {
+ pthread_cond_signal(&exclusive_cond);
+ }
+ }
+ exclusive_idle();
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+void cpu_list_lock(void)
+{
+ pthread_mutex_lock(&cpu_list_mutex);
+}
+
+void cpu_list_unlock(void)
+{
+ pthread_mutex_unlock(&cpu_list_mutex);
+}
+#else /* if !CONFIG_USE_NPTL */
+/* These are no-ops because we are not threadsafe. */
+static inline void cpu_exec_start(CPUState *env)
+{
+}
+
+static inline void cpu_exec_end(CPUState *env)
+{
+}
+
+static inline void start_exclusive(void)
+{
+}
+
+static inline void end_exclusive(void)
+{
+}
+
+void fork_start(void)
+{
+}
+
+void fork_end(int child)
+{
+ if (child) {
+ gdbserver_fork(thread_env);
+ }
+}
+
+void cpu_list_lock(void)
+{
+}
+
+void cpu_list_unlock(void)
+{
+}
+#endif
+
+
+#ifdef TARGET_I386
+/***********************************************************/
+/* CPUX86 core interface */
+
+void cpu_smm_update(CPUState *env)
+{
+}
+
+uint64_t cpu_get_tsc(CPUX86State *env)
+{
+ return cpu_get_real_ticks();
+}
+
+static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
+ int flags)
+{
+ unsigned int e1, e2;
+ uint32_t *p;
+ e1 = (addr << 16) | (limit & 0xffff);
+ e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
+ e2 |= flags;
+ p = ptr;
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
+}
+
+static uint64_t *idt_table;
+#ifdef TARGET_X86_64
+static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
+ uint64_t addr, unsigned int sel)
+{
+ uint32_t *p, e1, e2;
+ e1 = (addr & 0xffff) | (sel << 16);
+ e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
+ p = ptr;
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
+ p[2] = tswap32(addr >> 32);
+ p[3] = 0;
+}
+/* only dpl matters as we do only user space emulation */
+static void set_idt(int n, unsigned int dpl)
+{
+ set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
+}
+#else
+static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
+ uint32_t addr, unsigned int sel)
+{
+ uint32_t *p, e1, e2;
+ e1 = (addr & 0xffff) | (sel << 16);
+ e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
+ p = ptr;
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
+}
+
+/* only dpl matters as we do only user space emulation */
+static void set_idt(int n, unsigned int dpl)
+{
+ set_gate(idt_table + n, 0, dpl, 0, 0);
+}
+#endif
+
+void cpu_loop(CPUX86State *env)
+{
+ int trapnr;
+ abi_ulong pc;
+ target_siginfo_t info;
+
+ for(;;) {
+ trapnr = cpu_x86_exec(env);
+ switch(trapnr) {
+ case 0x80:
+ /* linux syscall from int $0x80 */
+ env->regs[R_EAX] = do_syscall(env,
+ env->regs[R_EAX],
+ env->regs[R_EBX],
+ env->regs[R_ECX],
+ env->regs[R_EDX],
+ env->regs[R_ESI],
+ env->regs[R_EDI],
+ env->regs[R_EBP]);
+ break;
+#ifndef TARGET_ABI32
+ case EXCP_SYSCALL:
+ /* linux syscall from syscall intruction */
+ env->regs[R_EAX] = do_syscall(env,
+ env->regs[R_EAX],
+ env->regs[R_EDI],
+ env->regs[R_ESI],
+ env->regs[R_EDX],
+ env->regs[10],
+ env->regs[8],
+ env->regs[9]);
+ env->eip = env->exception_next_eip;
+ break;
+#endif
+ case EXCP0B_NOSEG:
+ case EXCP0C_STACK:
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = TARGET_SI_KERNEL;
+ info._sifields._sigfault._addr = 0;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP0D_GPF:
+ /* XXX: potential problem if ABI32 */
+#ifndef TARGET_X86_64
+ if (env->eflags & VM_MASK) {
+ handle_vm86_fault(env);
+ } else
+#endif
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SI_KERNEL;
+ info._sifields._sigfault._addr = 0;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP0E_PAGE:
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ if (!(env->error_code & 1))
+ info.si_code = TARGET_SEGV_MAPERR;
+ else
+ info.si_code = TARGET_SEGV_ACCERR;
+ info._sifields._sigfault._addr = env->cr[2];
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP00_DIVZ:
+#ifndef TARGET_X86_64
+ if (env->eflags & VM_MASK) {
+ handle_vm86_trap(env, trapnr);
+ } else
+#endif
+ {
+ /* division by zero */
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_INTDIV;
+ info._sifields._sigfault._addr = env->eip;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP01_DB:
+ case EXCP03_INT3:
+#ifndef TARGET_X86_64
+ if (env->eflags & VM_MASK) {
+ handle_vm86_trap(env, trapnr);
+ } else
+#endif
+ {
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ if (trapnr == EXCP01_DB) {
+ info.si_code = TARGET_TRAP_BRKPT;
+ info._sifields._sigfault._addr = env->eip;
+ } else {
+ info.si_code = TARGET_SI_KERNEL;
+ info._sifields._sigfault._addr = 0;
+ }
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP04_INTO:
+ case EXCP05_BOUND:
+#ifndef TARGET_X86_64
+ if (env->eflags & VM_MASK) {
+ handle_vm86_trap(env, trapnr);
+ } else
+#endif
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SI_KERNEL;
+ info._sifields._sigfault._addr = 0;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP06_ILLOP:
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPN;
+ info._sifields._sigfault._addr = env->eip;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ default:
+ pc = env->segs[R_CS].base + env->eip;
+ fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
+ (long)pc, trapnr);
+ abort();
+ }
+ process_pending_signals(env);
+ }
+}
+#endif
+
+#ifdef TARGET_ARM
+
+static void arm_cache_flush(abi_ulong start, abi_ulong last)
+{
+ abi_ulong addr, last1;
+
+ if (last < start)
+ return;
+ addr = start;
+ for(;;) {
+ last1 = ((addr + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK) - 1;
+ if (last1 > last)
+ last1 = last;
+ tb_invalidate_page_range(addr, last1 + 1);
+ if (last1 == last)
+ break;
+ addr = last1 + 1;
+ }
+}
+
+/* Handle a jump to the kernel code page. */
+static int
+do_kernel_trap(CPUARMState *env)
+{
+ uint32_t addr;
+ uint32_t cpsr;
+ uint32_t val;
+
+ switch (env->regs[15]) {
+ case 0xffff0fa0: /* __kernel_memory_barrier */
+ /* ??? No-op. Will need to do better for SMP. */
+ break;
+ case 0xffff0fc0: /* __kernel_cmpxchg */
+ /* XXX: This only works between threads, not between processes.
+ It's probably possible to implement this with native host
+ operations. However things like ldrex/strex are much harder so
+ there's not much point trying. */
+ start_exclusive();
+ cpsr = cpsr_read(env);
+ addr = env->regs[2];
+ /* FIXME: This should SEGV if the access fails. */
+ if (get_user_u32(val, addr))
+ val = ~env->regs[0];
+ if (val == env->regs[0]) {
+ val = env->regs[1];
+ /* FIXME: Check for segfaults. */
+ put_user_u32(val, addr);
+ env->regs[0] = 0;
+ cpsr |= CPSR_C;
+ } else {
+ env->regs[0] = -1;
+ cpsr &= ~CPSR_C;
+ }
+ cpsr_write(env, cpsr, CPSR_C);
+ end_exclusive();
+ break;
+ case 0xffff0fe0: /* __kernel_get_tls */
+ env->regs[0] = env->cp15.c13_tls2;
+ break;
+ default:
+ return 1;
+ }
+ /* Jump back to the caller. */
+ addr = env->regs[14];
+ if (addr & 1) {
+ env->thumb = 1;
+ addr &= ~1;
+ }
+ env->regs[15] = addr;
+
+ return 0;
+}
+
+static int do_strex(CPUARMState *env)
+{
+ uint32_t val;
+ int size;
+ int rc = 1;
+ int segv = 0;
+ uint32_t addr;
+ start_exclusive();
+ addr = env->exclusive_addr;
+ if (addr != env->exclusive_test) {
+ goto fail;
+ }
+ size = env->exclusive_info & 0xf;
+ switch (size) {
+ case 0:
+ segv = get_user_u8(val, addr);
+ break;
+ case 1:
+ segv = get_user_u16(val, addr);
+ break;
+ case 2:
+ case 3:
+ segv = get_user_u32(val, addr);
+ break;
+ default:
+ abort();
+ }
+ if (segv) {
+ env->cp15.c6_data = addr;
+ goto done;
+ }
+ if (val != env->exclusive_val) {
+ goto fail;
+ }
+ if (size == 3) {
+ segv = get_user_u32(val, addr + 4);
+ if (segv) {
+ env->cp15.c6_data = addr + 4;
+ goto done;
+ }
+ if (val != env->exclusive_high) {
+ goto fail;
+ }
+ }
+ val = env->regs[(env->exclusive_info >> 8) & 0xf];
+ switch (size) {
+ case 0:
+ segv = put_user_u8(val, addr);
+ break;
+ case 1:
+ segv = put_user_u16(val, addr);
+ break;
+ case 2:
+ case 3:
+ segv = put_user_u32(val, addr);
+ break;
+ }
+ if (segv) {
+ env->cp15.c6_data = addr;
+ goto done;
+ }
+ if (size == 3) {
+ val = env->regs[(env->exclusive_info >> 12) & 0xf];
+ segv = put_user_u32(val, addr + 4);
+ if (segv) {
+ env->cp15.c6_data = addr + 4;
+ goto done;
+ }
+ }
+ rc = 0;
+fail:
+ env->regs[15] += 4;
+ env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
+done:
+ end_exclusive();
+ return segv;
+}
+
+void cpu_loop(CPUARMState *env)
+{
+ int trapnr;
+ unsigned int n, insn;
+ target_siginfo_t info;
+ uint32_t addr;
+
+ for(;;) {
+ cpu_exec_start(env);
+ trapnr = cpu_arm_exec(env);
+ cpu_exec_end(env);
+ switch(trapnr) {
+ case EXCP_UDEF:
+ {
+ TaskState *ts = env->opaque;
+ uint32_t opcode;
+ int rc;
+
+ /* we handle the FPU emulation here, as Linux */
+ /* we get the opcode */
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(opcode, env->regs[15]);
+
+ rc = EmulateAll(opcode, &ts->fpa, env);
+ if (rc == 0) { /* illegal instruction */
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPN;
+ info._sifields._sigfault._addr = env->regs[15];
+ queue_signal(env, info.si_signo, &info);
+ } else if (rc < 0) { /* FP exception */
+ int arm_fpe=0;
+
+ /* translate softfloat flags to FPSR flags */
+ if (-rc & float_flag_invalid)
+ arm_fpe |= BIT_IOC;
+ if (-rc & float_flag_divbyzero)
+ arm_fpe |= BIT_DZC;
+ if (-rc & float_flag_overflow)
+ arm_fpe |= BIT_OFC;
+ if (-rc & float_flag_underflow)
+ arm_fpe |= BIT_UFC;
+ if (-rc & float_flag_inexact)
+ arm_fpe |= BIT_IXC;
+
+ FPSR fpsr = ts->fpa.fpsr;
+ //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
+
+ if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+
+ /* ordered by priority, least first */
+ if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
+ if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
+ if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
+ if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
+ if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
+
+ info._sifields._sigfault._addr = env->regs[15];
+ queue_signal(env, info.si_signo, &info);
+ } else {
+ env->regs[15] += 4;
+ }
+
+ /* accumulate unenabled exceptions */
+ if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
+ fpsr |= BIT_IXC;
+ if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
+ fpsr |= BIT_UFC;
+ if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
+ fpsr |= BIT_OFC;
+ if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
+ fpsr |= BIT_DZC;
+ if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
+ fpsr |= BIT_IOC;
+ ts->fpa.fpsr=fpsr;
+ } else { /* everything OK */
+ /* increment PC */
+ env->regs[15] += 4;
+ }
+ }
+ break;
+ case EXCP_SWI:
+ case EXCP_BKPT:
+ {
+ env->eabi = 1;
+ /* system call */
+ if (trapnr == EXCP_BKPT) {
+ if (env->thumb) {
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u16(insn, env->regs[15]);
+ n = insn & 0xff;
+ env->regs[15] += 2;
+ } else {
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(insn, env->regs[15]);
+ n = (insn & 0xf) | ((insn >> 4) & 0xff0);
+ env->regs[15] += 4;
+ }
+ } else {
+ if (env->thumb) {
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u16(insn, env->regs[15] - 2);
+ n = insn & 0xff;
+ } else {
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(insn, env->regs[15] - 4);
+ n = insn & 0xffffff;
+ }
+ }
+
+ if (n == ARM_NR_cacheflush) {
+ arm_cache_flush(env->regs[0], env->regs[1]);
+ } else if (n == ARM_NR_semihosting
+ || n == ARM_NR_thumb_semihosting) {
+ env->regs[0] = do_arm_semihosting (env);
+ } else if (n == 0 || n >= ARM_SYSCALL_BASE
+ || (env->thumb && n == ARM_THUMB_SYSCALL)) {
+ /* linux syscall */
+ if (env->thumb || n == 0) {
+ n = env->regs[7];
+ } else {
+ n -= ARM_SYSCALL_BASE;
+ env->eabi = 0;
+ }
+ if ( n > ARM_NR_BASE) {
+ switch (n) {
+ case ARM_NR_cacheflush:
+ arm_cache_flush(env->regs[0], env->regs[1]);
+ break;
+ case ARM_NR_set_tls:
+ cpu_set_tls(env, env->regs[0]);
+ env->regs[0] = 0;
+ break;
+ default:
+ gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
+ n);
+ env->regs[0] = -TARGET_ENOSYS;
+ break;
+ }
+ } else {
+ env->regs[0] = do_syscall(env,
+ n,
+ env->regs[0],
+ env->regs[1],
+ env->regs[2],
+ env->regs[3],
+ env->regs[4],
+ env->regs[5]);
+ }
+ } else {
+ goto error;
+ }
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_PREFETCH_ABORT:
+ addr = env->cp15.c6_insn;
+ goto do_segv;
+ case EXCP_DATA_ABORT:
+ addr = env->cp15.c6_data;
+ goto do_segv;
+ do_segv:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = addr;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ case EXCP_KERNEL_TRAP:
+ if (do_kernel_trap(env))
+ goto error;
+ break;
+ case EXCP_STREX:
+ if (do_strex(env)) {
+ addr = env->cp15.c6_data;
+ goto do_segv;
+ }
+ break;
+ default:
+ error:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ abort();
+ }
+ process_pending_signals(env);
+ }
+}
+
+#endif
+
+#ifdef TARGET_SPARC
+#define SPARC64_STACK_BIAS 2047
+
+//#define DEBUG_WIN
+
+/* WARNING: dealing with register windows _is_ complicated. More info
+ can be found at http://www.sics.se/~psm/sparcstack.html */
+static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
+{
+ index = (index + cwp * 16) % (16 * env->nwindows);
+ /* wrap handling : if cwp is on the last window, then we use the
+ registers 'after' the end */
+ if (index < 8 && env->cwp == env->nwindows - 1)
+ index += 16 * env->nwindows;
+ return index;
+}
+
+/* save the register window 'cwp1' */
+static inline void save_window_offset(CPUSPARCState *env, int cwp1)
+{
+ unsigned int i;
+ abi_ulong sp_ptr;
+
+ sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
+#ifdef TARGET_SPARC64
+ if (sp_ptr & 3)
+ sp_ptr += SPARC64_STACK_BIAS;
+#endif
+#if defined(DEBUG_WIN)
+ printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
+ sp_ptr, cwp1);
+#endif
+ for(i = 0; i < 16; i++) {
+ /* FIXME - what to do if put_user() fails? */
+ put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
+ sp_ptr += sizeof(abi_ulong);
+ }
+}
+
+static void save_window(CPUSPARCState *env)
+{
+#ifndef TARGET_SPARC64
+ unsigned int new_wim;
+ new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
+ env->wim = new_wim;
+#else
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
+ env->cansave++;
+ env->canrestore--;
+#endif
+}
+
+static void restore_window(CPUSPARCState *env)
+{
+#ifndef TARGET_SPARC64
+ unsigned int new_wim;
+#endif
+ unsigned int i, cwp1;
+ abi_ulong sp_ptr;
+
+#ifndef TARGET_SPARC64
+ new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+#endif
+
+ /* restore the invalid window */
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
+ sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
+#ifdef TARGET_SPARC64
+ if (sp_ptr & 3)
+ sp_ptr += SPARC64_STACK_BIAS;
+#endif
+#if defined(DEBUG_WIN)
+ printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
+ sp_ptr, cwp1);
+#endif
+ for(i = 0; i < 16; i++) {
+ /* FIXME - what to do if get_user() fails? */
+ get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
+ sp_ptr += sizeof(abi_ulong);
+ }
+#ifdef TARGET_SPARC64
+ env->canrestore++;
+ if (env->cleanwin < env->nwindows - 1)
+ env->cleanwin++;
+ env->cansave--;
+#else
+ env->wim = new_wim;
+#endif
+}
+
+static void flush_windows(CPUSPARCState *env)
+{
+ int offset, cwp1;
+
+ offset = 1;
+ for(;;) {
+ /* if restore would invoke restore_window(), then we can stop */
+ cwp1 = cpu_cwp_inc(env, env->cwp + offset);
+#ifndef TARGET_SPARC64
+ if (env->wim & (1 << cwp1))
+ break;
+#else
+ if (env->canrestore == 0)
+ break;
+ env->cansave++;
+ env->canrestore--;
+#endif
+ save_window_offset(env, cwp1);
+ offset++;
+ }
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
+#ifndef TARGET_SPARC64
+ /* set wim so that restore will reload the registers */
+ env->wim = 1 << cwp1;
+#endif
+#if defined(DEBUG_WIN)
+ printf("flush_windows: nb=%d\n", offset - 1);
+#endif
+}
+
+void cpu_loop (CPUSPARCState *env)
+{
+ int trapnr;
+ abi_long ret;
+ target_siginfo_t info;
+
+ while (1) {
+ trapnr = cpu_sparc_exec (env);
+
+ switch (trapnr) {
+#ifndef TARGET_SPARC64
+ case 0x88:
+ case 0x90:
+#else
+ case 0x110:
+ case 0x16d:
+#endif
+ ret = do_syscall (env, env->gregs[1],
+ env->regwptr[0], env->regwptr[1],
+ env->regwptr[2], env->regwptr[3],
+ env->regwptr[4], env->regwptr[5]);
+ if ((abi_ulong)ret >= (abi_ulong)(-515)) {
+#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+ env->xcc |= PSR_CARRY;
+#else
+ env->psr |= PSR_CARRY;
+#endif
+ ret = -ret;
+ } else {
+#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+ env->xcc &= ~PSR_CARRY;
+#else
+ env->psr &= ~PSR_CARRY;
+#endif
+ }
+ env->regwptr[0] = ret;
+ /* next instruction */
+ env->pc = env->npc;
+ env->npc = env->npc + 4;
+ break;
+ case 0x83: /* flush windows */
+#ifdef TARGET_ABI32
+ case 0x103:
+#endif
+ flush_windows(env);
+ /* next instruction */
+ env->pc = env->npc;
+ env->npc = env->npc + 4;
+ break;
+#ifndef TARGET_SPARC64
+ case TT_WIN_OVF: /* window overflow */
+ save_window(env);
+ break;
+ case TT_WIN_UNF: /* window underflow */
+ restore_window(env);
+ break;
+ case TT_TFAULT:
+ case TT_DFAULT:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->mmuregs[4];
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+#else
+ case TT_SPILL: /* window overflow */
+ save_window(env);
+ break;
+ case TT_FILL: /* window underflow */
+ restore_window(env);
+ break;
+ case TT_TFAULT:
+ case TT_DFAULT:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ if (trapnr == TT_DFAULT)
+ info._sifields._sigfault._addr = env->dmmuregs[4];
+ else
+ info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+#ifndef TARGET_ABI32
+ case 0x16e:
+ flush_windows(env);
+ sparc64_get_context(env);
+ break;
+ case 0x16f:
+ flush_windows(env);
+ sparc64_set_context(env);
+ break;
+#endif
+#endif
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+
+#endif
+
+#ifdef TARGET_PPC
+static inline uint64_t cpu_ppc_get_tb (CPUState *env)
+{
+ /* TO FIX */
+ return 0;
+}
+
+uint64_t cpu_ppc_load_tbl (CPUState *env)
+{
+ return cpu_ppc_get_tb(env);
+}
+
+uint32_t cpu_ppc_load_tbu (CPUState *env)
+{
+ return cpu_ppc_get_tb(env) >> 32;
+}
+
+uint64_t cpu_ppc_load_atbl (CPUState *env)
+{
+ return cpu_ppc_get_tb(env);
+}
+
+uint32_t cpu_ppc_load_atbu (CPUState *env)
+{
+ return cpu_ppc_get_tb(env) >> 32;
+}
+
+uint32_t cpu_ppc601_load_rtcu (CPUState *env)
+__attribute__ (( alias ("cpu_ppc_load_tbu") ));
+
+uint32_t cpu_ppc601_load_rtcl (CPUState *env)
+{
+ return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
+}
+
+/* XXX: to be fixed */
+int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
+{
+ return -1;
+}
+
+int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
+{
+ return -1;
+}
+
+#define EXCP_DUMP(env, fmt, ...) \
+do { \
+ fprintf(stderr, fmt , ## __VA_ARGS__); \
+ cpu_dump_state(env, stderr, fprintf, 0); \
+ qemu_log(fmt, ## __VA_ARGS__); \
+ if (logfile) \
+ log_cpu_state(env, 0); \
+} while (0)
+
+static int do_store_exclusive(CPUPPCState *env)
+{
+ target_ulong addr;
+ target_ulong page_addr;
+ target_ulong val;
+ int flags;
+ int segv = 0;
+
+ addr = env->reserve_ea;
+ page_addr = addr & TARGET_PAGE_MASK;
+ start_exclusive();
+ mmap_lock();
+ flags = page_get_flags(page_addr);
+ if ((flags & PAGE_READ) == 0) {
+ segv = 1;
+ } else {
+ int reg = env->reserve_info & 0x1f;
+ int size = (env->reserve_info >> 5) & 0xf;
+ int stored = 0;
+
+ if (addr == env->reserve_addr) {
+ switch (size) {
+ case 1: segv = get_user_u8(val, addr); break;
+ case 2: segv = get_user_u16(val, addr); break;
+ case 4: segv = get_user_u32(val, addr); break;
+#if defined(TARGET_PPC64)
+ case 8: segv = get_user_u64(val, addr); break;
+#endif
+ default: abort();
+ }
+ if (!segv && val == env->reserve_val) {
+ val = env->gpr[reg];
+ switch (size) {
+ case 1: segv = put_user_u8(val, addr); break;
+ case 2: segv = put_user_u16(val, addr); break;
+ case 4: segv = put_user_u32(val, addr); break;
+#if defined(TARGET_PPC64)
+ case 8: segv = put_user_u64(val, addr); break;
+#endif
+ default: abort();
+ }
+ if (!segv) {
+ stored = 1;
+ }
+ }
+ }
+ env->crf[0] = (stored << 1) | xer_so;
+ env->reserve_addr = (target_ulong)-1;
+ }
+ if (!segv) {
+ env->nip += 4;
+ }
+ mmap_unlock();
+ end_exclusive();
+ return segv;
+}
+
+void cpu_loop(CPUPPCState *env)
+{
+ target_siginfo_t info;
+ int trapnr;
+ uint32_t ret;
+
+ for(;;) {
+ cpu_exec_start(env);
+ trapnr = cpu_ppc_exec(env);
+ cpu_exec_end(env);
+ switch(trapnr) {
+ case POWERPC_EXCP_NONE:
+ /* Just go on */
+ break;
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ cpu_abort(env, "Critical interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ cpu_abort(env, "Machine check exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n",
+ env->spr[SPR_DAR]);
+ /* XXX: check this. Seems bugged */
+ switch (env->error_code & 0xFF000000) {
+ case 0x40000000:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ break;
+ case 0x04000000:
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLADR;
+ break;
+ case 0x08000000:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_ACCERR;
+ break;
+ default:
+ /* Let's send a regular segfault... */
+ EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
+ env->error_code);
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ break;
+ }
+ info._sifields._sigfault._addr = env->nip;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
+ "\n", env->spr[SPR_SRR0]);
+ /* XXX: check this */
+ switch (env->error_code & 0xFF000000) {
+ case 0x40000000:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ break;
+ case 0x10000000:
+ case 0x08000000:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_ACCERR;
+ break;
+ default:
+ /* Let's send a regular segfault... */
+ EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
+ env->error_code);
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ break;
+ }
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ cpu_abort(env, "External interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ EXCP_DUMP(env, "Unaligned memory access\n");
+ /* XXX: check this */
+ info.si_signo = TARGET_SIGBUS;
+ info.si_errno = 0;
+ info.si_code = TARGET_BUS_ADRALN;
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ /* XXX: check this */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ EXCP_DUMP(env, "Floating point program exception\n");
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ switch (env->error_code & 0xF) {
+ case POWERPC_EXCP_FP_OX:
+ info.si_code = TARGET_FPE_FLTOVF;
+ break;
+ case POWERPC_EXCP_FP_UX:
+ info.si_code = TARGET_FPE_FLTUND;
+ break;
+ case POWERPC_EXCP_FP_ZX:
+ case POWERPC_EXCP_FP_VXZDZ:
+ info.si_code = TARGET_FPE_FLTDIV;
+ break;
+ case POWERPC_EXCP_FP_XX:
+ info.si_code = TARGET_FPE_FLTRES;
+ break;
+ case POWERPC_EXCP_FP_VXSOFT:
+ info.si_code = TARGET_FPE_FLTINV;
+ break;
+ case POWERPC_EXCP_FP_VXSNAN:
+ case POWERPC_EXCP_FP_VXISI:
+ case POWERPC_EXCP_FP_VXIDI:
+ case POWERPC_EXCP_FP_VXIMZ:
+ case POWERPC_EXCP_FP_VXVC:
+ case POWERPC_EXCP_FP_VXSQRT:
+ case POWERPC_EXCP_FP_VXCVI:
+ info.si_code = TARGET_FPE_FLTSUB;
+ break;
+ default:
+ EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_INVAL:
+ EXCP_DUMP(env, "Invalid instruction\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ switch (env->error_code & 0xF) {
+ case POWERPC_EXCP_INVAL_INVAL:
+ info.si_code = TARGET_ILL_ILLOPC;
+ break;
+ case POWERPC_EXCP_INVAL_LSWX:
+ info.si_code = TARGET_ILL_ILLOPN;
+ break;
+ case POWERPC_EXCP_INVAL_SPR:
+ info.si_code = TARGET_ILL_PRVREG;
+ break;
+ case POWERPC_EXCP_INVAL_FP:
+ info.si_code = TARGET_ILL_COPROC;
+ break;
+ default:
+ EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
+ env->error_code & 0xF);
+ info.si_code = TARGET_ILL_ILLADR;
+ break;
+ }
+ break;
+ case POWERPC_EXCP_PRIV:
+ EXCP_DUMP(env, "Privilege violation\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ switch (env->error_code & 0xF) {
+ case POWERPC_EXCP_PRIV_OPC:
+ info.si_code = TARGET_ILL_PRVOPC;
+ break;
+ case POWERPC_EXCP_PRIV_REG:
+ info.si_code = TARGET_ILL_PRVREG;
+ break;
+ default:
+ EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
+ env->error_code & 0xF);
+ info.si_code = TARGET_ILL_PRVOPC;
+ break;
+ }
+ break;
+ case POWERPC_EXCP_TRAP:
+ cpu_abort(env, "Tried to call a TRAP\n");
+ break;
+ default:
+ /* Should not happen ! */
+ cpu_abort(env, "Unknown program exception (%02x)\n",
+ env->error_code);
+ break;
+ }
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ EXCP_DUMP(env, "No floating point allowed\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_COPROC;
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ cpu_abort(env, "Syscall exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
+ EXCP_DUMP(env, "No APU instruction allowed\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_COPROC;
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ cpu_abort(env, "Decrementer interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ cpu_abort(env, "Fix interval timer interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ cpu_abort(env, "Watchdog timer interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ cpu_abort(env, "Data TLB exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ cpu_abort(env, "Instruction TLB exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
+ EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_COPROC;
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
+ cpu_abort(env, "Embedded floating-point data IRQ not handled\n");
+ break;
+ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
+ cpu_abort(env, "Embedded floating-point round IRQ not handled\n");
+ break;
+ case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
+ cpu_abort(env, "Performance monitor exception not handled\n");
+ break;
+ case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
+ cpu_abort(env, "Doorbell interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
+ cpu_abort(env, "Doorbell critical interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ cpu_abort(env, "Reset interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_DSEG: /* Data segment exception */
+ cpu_abort(env, "Data segment exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
+ cpu_abort(env, "Instruction segment exception "
+ "while in user mode. Aborting\n");
+ break;
+ /* PowerPC 64 with hypervisor mode support */
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
+ cpu_abort(env, "Hypervisor decrementer interrupt "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ /* Nothing to do:
+ * we use this exception to emulate step-by-step execution mode.
+ */
+ break;
+ /* PowerPC 64 with hypervisor mode support */
+ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
+ cpu_abort(env, "Hypervisor data storage exception "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
+ cpu_abort(env, "Hypervisor instruction storage exception "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
+ cpu_abort(env, "Hypervisor data segment exception "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
+ cpu_abort(env, "Hypervisor instruction segment exception "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ EXCP_DUMP(env, "No Altivec instructions allowed\n");
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_COPROC;
+ info._sifields._sigfault._addr = env->nip - 4;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
+ cpu_abort(env, "Programable interval timer interrupt "
+ "while in user mode. Aborting\n");
+ break;
+ case POWERPC_EXCP_IO: /* IO error exception */
+ cpu_abort(env, "IO error exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_RUNM: /* Run mode exception */
+ cpu_abort(env, "Run mode exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_EMUL: /* Emulation trap exception */
+ cpu_abort(env, "Emulation trap exception not handled\n");
+ break;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ cpu_abort(env, "Instruction fetch TLB exception "
+ "while in user-mode. Aborting");
+ break;
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ cpu_abort(env, "Data load TLB exception while in user-mode. "
+ "Aborting");
+ break;
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ cpu_abort(env, "Data store TLB exception while in user-mode. "
+ "Aborting");
+ break;
+ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
+ cpu_abort(env, "Floating-point assist exception not handled\n");
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ cpu_abort(env, "Instruction address breakpoint exception "
+ "not handled\n");
+ break;
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ cpu_abort(env, "System management interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ cpu_abort(env, "Thermal interrupt interrupt while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
+ cpu_abort(env, "Performance monitor exception not handled\n");
+ break;
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ cpu_abort(env, "Vector assist exception not handled\n");
+ break;
+ case POWERPC_EXCP_SOFTP: /* Soft patch exception */
+ cpu_abort(env, "Soft patch exception not handled\n");
+ break;
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ cpu_abort(env, "Maintenance exception while in user mode. "
+ "Aborting\n");
+ break;
+ case POWERPC_EXCP_STOP: /* stop translation */
+ /* We did invalidate the instruction cache. Go on */
+ break;
+ case POWERPC_EXCP_BRANCH: /* branch instruction: */
+ /* We just stopped because of a branch. Go on */
+ break;
+ case POWERPC_EXCP_SYSCALL_USER:
+ /* system call in user-mode emulation */
+ /* WARNING:
+ * PPC ABI uses overflow flag in cr0 to signal an error
+ * in syscalls.
+ */
+#if 0
+ printf("syscall %d 0x%08x 0x%08x 0x%08x 0x%08x\n", env->gpr[0],
+ env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6]);
+#endif
+ env->crf[0] &= ~0x1;
+ ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
+ env->gpr[5], env->gpr[6], env->gpr[7],
+ env->gpr[8]);
+ if (ret == (uint32_t)(-TARGET_QEMU_ESIGRETURN)) {
+ /* Returning from a successful sigreturn syscall.
+ Avoid corrupting register state. */
+ break;
+ }
+ if (ret > (uint32_t)(-515)) {
+ env->crf[0] |= 0x1;
+ ret = -ret;
+ }
+ env->gpr[3] = ret;
+#if 0
+ printf("syscall returned 0x%08x (%d)\n", ret, ret);
+#endif
+ break;
+ case POWERPC_EXCP_STCX:
+ if (do_store_exclusive(env)) {
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->nip;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig(env, TARGET_SIGTRAP);
+ if (sig) {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ default:
+ cpu_abort(env, "Unknown exception 0x%d. Aborting\n", trapnr);
+ break;
+ }
+ process_pending_signals(env);
+ }
+}
+#endif
+
+#ifdef TARGET_MIPS
+
+#define MIPS_SYS(name, args) args,
+
+static const uint8_t mips_syscall_args[] = {
+ MIPS_SYS(sys_syscall , 0) /* 4000 */
+ MIPS_SYS(sys_exit , 1)
+ MIPS_SYS(sys_fork , 0)
+ MIPS_SYS(sys_read , 3)
+ MIPS_SYS(sys_write , 3)
+ MIPS_SYS(sys_open , 3) /* 4005 */
+ MIPS_SYS(sys_close , 1)
+ MIPS_SYS(sys_waitpid , 3)
+ MIPS_SYS(sys_creat , 2)
+ MIPS_SYS(sys_link , 2)
+ MIPS_SYS(sys_unlink , 1) /* 4010 */
+ MIPS_SYS(sys_execve , 0)
+ MIPS_SYS(sys_chdir , 1)
+ MIPS_SYS(sys_time , 1)
+ MIPS_SYS(sys_mknod , 3)
+ MIPS_SYS(sys_chmod , 2) /* 4015 */
+ MIPS_SYS(sys_lchown , 3)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */
+ MIPS_SYS(sys_lseek , 3)
+ MIPS_SYS(sys_getpid , 0) /* 4020 */
+ MIPS_SYS(sys_mount , 5)
+ MIPS_SYS(sys_oldumount , 1)
+ MIPS_SYS(sys_setuid , 1)
+ MIPS_SYS(sys_getuid , 0)
+ MIPS_SYS(sys_stime , 1) /* 4025 */
+ MIPS_SYS(sys_ptrace , 4)
+ MIPS_SYS(sys_alarm , 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */
+ MIPS_SYS(sys_pause , 0)
+ MIPS_SYS(sys_utime , 2) /* 4030 */
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_access , 2)
+ MIPS_SYS(sys_nice , 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* 4035 */
+ MIPS_SYS(sys_sync , 0)
+ MIPS_SYS(sys_kill , 2)
+ MIPS_SYS(sys_rename , 2)
+ MIPS_SYS(sys_mkdir , 2)
+ MIPS_SYS(sys_rmdir , 1) /* 4040 */
+ MIPS_SYS(sys_dup , 1)
+ MIPS_SYS(sys_pipe , 0)
+ MIPS_SYS(sys_times , 1)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_brk , 1) /* 4045 */
+ MIPS_SYS(sys_setgid , 1)
+ MIPS_SYS(sys_getgid , 0)
+ MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */
+ MIPS_SYS(sys_geteuid , 0)
+ MIPS_SYS(sys_getegid , 0) /* 4050 */
+ MIPS_SYS(sys_acct , 0)
+ MIPS_SYS(sys_umount , 2)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_ioctl , 3)
+ MIPS_SYS(sys_fcntl , 3) /* 4055 */
+ MIPS_SYS(sys_ni_syscall , 2)
+ MIPS_SYS(sys_setpgid , 2)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_olduname , 1)
+ MIPS_SYS(sys_umask , 1) /* 4060 */
+ MIPS_SYS(sys_chroot , 1)
+ MIPS_SYS(sys_ustat , 2)
+ MIPS_SYS(sys_dup2 , 2)
+ MIPS_SYS(sys_getppid , 0)
+ MIPS_SYS(sys_getpgrp , 0) /* 4065 */
+ MIPS_SYS(sys_setsid , 0)
+ MIPS_SYS(sys_sigaction , 3)
+ MIPS_SYS(sys_sgetmask , 0)
+ MIPS_SYS(sys_ssetmask , 1)
+ MIPS_SYS(sys_setreuid , 2) /* 4070 */
+ MIPS_SYS(sys_setregid , 2)
+ MIPS_SYS(sys_sigsuspend , 0)
+ MIPS_SYS(sys_sigpending , 1)
+ MIPS_SYS(sys_sethostname , 2)
+ MIPS_SYS(sys_setrlimit , 2) /* 4075 */
+ MIPS_SYS(sys_getrlimit , 2)
+ MIPS_SYS(sys_getrusage , 2)
+ MIPS_SYS(sys_gettimeofday, 2)
+ MIPS_SYS(sys_settimeofday, 2)
+ MIPS_SYS(sys_getgroups , 2) /* 4080 */
+ MIPS_SYS(sys_setgroups , 2)
+ MIPS_SYS(sys_ni_syscall , 0) /* old_select */
+ MIPS_SYS(sys_symlink , 2)
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */
+ MIPS_SYS(sys_readlink , 3) /* 4085 */
+ MIPS_SYS(sys_uselib , 1)
+ MIPS_SYS(sys_swapon , 2)
+ MIPS_SYS(sys_reboot , 3)
+ MIPS_SYS(old_readdir , 3)
+ MIPS_SYS(old_mmap , 6) /* 4090 */
+ MIPS_SYS(sys_munmap , 2)
+ MIPS_SYS(sys_truncate , 2)
+ MIPS_SYS(sys_ftruncate , 2)
+ MIPS_SYS(sys_fchmod , 2)
+ MIPS_SYS(sys_fchown , 3) /* 4095 */
+ MIPS_SYS(sys_getpriority , 2)
+ MIPS_SYS(sys_setpriority , 3)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_statfs , 2)
+ MIPS_SYS(sys_fstatfs , 2) /* 4100 */
+ MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */
+ MIPS_SYS(sys_socketcall , 2)
+ MIPS_SYS(sys_syslog , 3)
+ MIPS_SYS(sys_setitimer , 3)
+ MIPS_SYS(sys_getitimer , 2) /* 4105 */
+ MIPS_SYS(sys_newstat , 2)
+ MIPS_SYS(sys_newlstat , 2)
+ MIPS_SYS(sys_newfstat , 2)
+ MIPS_SYS(sys_uname , 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */
+ MIPS_SYS(sys_vhangup , 0)
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */
+ MIPS_SYS(sys_wait4 , 4)
+ MIPS_SYS(sys_swapoff , 1) /* 4115 */
+ MIPS_SYS(sys_sysinfo , 1)
+ MIPS_SYS(sys_ipc , 6)
+ MIPS_SYS(sys_fsync , 1)
+ MIPS_SYS(sys_sigreturn , 0)
+ MIPS_SYS(sys_clone , 6) /* 4120 */
+ MIPS_SYS(sys_setdomainname, 2)
+ MIPS_SYS(sys_newuname , 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */
+ MIPS_SYS(sys_adjtimex , 1)
+ MIPS_SYS(sys_mprotect , 3) /* 4125 */
+ MIPS_SYS(sys_sigprocmask , 3)
+ MIPS_SYS(sys_ni_syscall , 0) /* was create_module */
+ MIPS_SYS(sys_init_module , 5)
+ MIPS_SYS(sys_delete_module, 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */
+ MIPS_SYS(sys_quotactl , 0)
+ MIPS_SYS(sys_getpgid , 1)
+ MIPS_SYS(sys_fchdir , 1)
+ MIPS_SYS(sys_bdflush , 2)
+ MIPS_SYS(sys_sysfs , 3) /* 4135 */
+ MIPS_SYS(sys_personality , 1)
+ MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */
+ MIPS_SYS(sys_setfsuid , 1)
+ MIPS_SYS(sys_setfsgid , 1)
+ MIPS_SYS(sys_llseek , 5) /* 4140 */
+ MIPS_SYS(sys_getdents , 3)
+ MIPS_SYS(sys_select , 5)
+ MIPS_SYS(sys_flock , 2)
+ MIPS_SYS(sys_msync , 3)
+ MIPS_SYS(sys_readv , 3) /* 4145 */
+ MIPS_SYS(sys_writev , 3)
+ MIPS_SYS(sys_cacheflush , 3)
+ MIPS_SYS(sys_cachectl , 3)
+ MIPS_SYS(sys_sysmips , 4)
+ MIPS_SYS(sys_ni_syscall , 0) /* 4150 */
+ MIPS_SYS(sys_getsid , 1)
+ MIPS_SYS(sys_fdatasync , 0)
+ MIPS_SYS(sys_sysctl , 1)
+ MIPS_SYS(sys_mlock , 2)
+ MIPS_SYS(sys_munlock , 2) /* 4155 */
+ MIPS_SYS(sys_mlockall , 1)
+ MIPS_SYS(sys_munlockall , 0)
+ MIPS_SYS(sys_sched_setparam, 2)
+ MIPS_SYS(sys_sched_getparam, 2)
+ MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */
+ MIPS_SYS(sys_sched_getscheduler, 1)
+ MIPS_SYS(sys_sched_yield , 0)
+ MIPS_SYS(sys_sched_get_priority_max, 1)
+ MIPS_SYS(sys_sched_get_priority_min, 1)
+ MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */
+ MIPS_SYS(sys_nanosleep, 2)
+ MIPS_SYS(sys_mremap , 4)
+ MIPS_SYS(sys_accept , 3)
+ MIPS_SYS(sys_bind , 3)
+ MIPS_SYS(sys_connect , 3) /* 4170 */
+ MIPS_SYS(sys_getpeername , 3)
+ MIPS_SYS(sys_getsockname , 3)
+ MIPS_SYS(sys_getsockopt , 5)
+ MIPS_SYS(sys_listen , 2)
+ MIPS_SYS(sys_recv , 4) /* 4175 */
+ MIPS_SYS(sys_recvfrom , 6)
+ MIPS_SYS(sys_recvmsg , 3)
+ MIPS_SYS(sys_send , 4)
+ MIPS_SYS(sys_sendmsg , 3)
+ MIPS_SYS(sys_sendto , 6) /* 4180 */
+ MIPS_SYS(sys_setsockopt , 5)
+ MIPS_SYS(sys_shutdown , 2)
+ MIPS_SYS(sys_socket , 3)
+ MIPS_SYS(sys_socketpair , 4)
+ MIPS_SYS(sys_setresuid , 3) /* 4185 */
+ MIPS_SYS(sys_getresuid , 3)
+ MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */
+ MIPS_SYS(sys_poll , 3)
+ MIPS_SYS(sys_nfsservctl , 3)
+ MIPS_SYS(sys_setresgid , 3) /* 4190 */
+ MIPS_SYS(sys_getresgid , 3)
+ MIPS_SYS(sys_prctl , 5)
+ MIPS_SYS(sys_rt_sigreturn, 0)
+ MIPS_SYS(sys_rt_sigaction, 4)
+ MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */
+ MIPS_SYS(sys_rt_sigpending, 2)
+ MIPS_SYS(sys_rt_sigtimedwait, 4)
+ MIPS_SYS(sys_rt_sigqueueinfo, 3)
+ MIPS_SYS(sys_rt_sigsuspend, 0)
+ MIPS_SYS(sys_pread64 , 6) /* 4200 */
+ MIPS_SYS(sys_pwrite64 , 6)
+ MIPS_SYS(sys_chown , 3)
+ MIPS_SYS(sys_getcwd , 2)
+ MIPS_SYS(sys_capget , 2)
+ MIPS_SYS(sys_capset , 2) /* 4205 */
+ MIPS_SYS(sys_sigaltstack , 0)
+ MIPS_SYS(sys_sendfile , 4)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_mmap2 , 6) /* 4210 */
+ MIPS_SYS(sys_truncate64 , 4)
+ MIPS_SYS(sys_ftruncate64 , 4)
+ MIPS_SYS(sys_stat64 , 2)
+ MIPS_SYS(sys_lstat64 , 2)
+ MIPS_SYS(sys_fstat64 , 2) /* 4215 */
+ MIPS_SYS(sys_pivot_root , 2)
+ MIPS_SYS(sys_mincore , 3)
+ MIPS_SYS(sys_madvise , 3)
+ MIPS_SYS(sys_getdents64 , 3)
+ MIPS_SYS(sys_fcntl64 , 3) /* 4220 */
+ MIPS_SYS(sys_ni_syscall , 0)
+ MIPS_SYS(sys_gettid , 0)
+ MIPS_SYS(sys_readahead , 5)
+ MIPS_SYS(sys_setxattr , 5)
+ MIPS_SYS(sys_lsetxattr , 5) /* 4225 */
+ MIPS_SYS(sys_fsetxattr , 5)
+ MIPS_SYS(sys_getxattr , 4)
+ MIPS_SYS(sys_lgetxattr , 4)
+ MIPS_SYS(sys_fgetxattr , 4)
+ MIPS_SYS(sys_listxattr , 3) /* 4230 */
+ MIPS_SYS(sys_llistxattr , 3)
+ MIPS_SYS(sys_flistxattr , 3)
+ MIPS_SYS(sys_removexattr , 2)
+ MIPS_SYS(sys_lremovexattr, 2)
+ MIPS_SYS(sys_fremovexattr, 2) /* 4235 */
+ MIPS_SYS(sys_tkill , 2)
+ MIPS_SYS(sys_sendfile64 , 5)
+ MIPS_SYS(sys_futex , 2)
+ MIPS_SYS(sys_sched_setaffinity, 3)
+ MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */
+ MIPS_SYS(sys_io_setup , 2)
+ MIPS_SYS(sys_io_destroy , 1)
+ MIPS_SYS(sys_io_getevents, 5)
+ MIPS_SYS(sys_io_submit , 3)
+ MIPS_SYS(sys_io_cancel , 3) /* 4245 */
+ MIPS_SYS(sys_exit_group , 1)
+ MIPS_SYS(sys_lookup_dcookie, 3)
+ MIPS_SYS(sys_epoll_create, 1)
+ MIPS_SYS(sys_epoll_ctl , 4)
+ MIPS_SYS(sys_epoll_wait , 3) /* 4250 */
+ MIPS_SYS(sys_remap_file_pages, 5)
+ MIPS_SYS(sys_set_tid_address, 1)
+ MIPS_SYS(sys_restart_syscall, 0)
+ MIPS_SYS(sys_fadvise64_64, 7)
+ MIPS_SYS(sys_statfs64 , 3) /* 4255 */
+ MIPS_SYS(sys_fstatfs64 , 2)
+ MIPS_SYS(sys_timer_create, 3)
+ MIPS_SYS(sys_timer_settime, 4)
+ MIPS_SYS(sys_timer_gettime, 2)
+ MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */
+ MIPS_SYS(sys_timer_delete, 1)
+ MIPS_SYS(sys_clock_settime, 2)
+ MIPS_SYS(sys_clock_gettime, 2)
+ MIPS_SYS(sys_clock_getres, 2)
+ MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */
+ MIPS_SYS(sys_tgkill , 3)
+ MIPS_SYS(sys_utimes , 2)
+ MIPS_SYS(sys_mbind , 4)
+ MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */
+ MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */
+ MIPS_SYS(sys_mq_open , 4)
+ MIPS_SYS(sys_mq_unlink , 1)
+ MIPS_SYS(sys_mq_timedsend, 5)
+ MIPS_SYS(sys_mq_timedreceive, 5)
+ MIPS_SYS(sys_mq_notify , 2) /* 4275 */
+ MIPS_SYS(sys_mq_getsetattr, 3)
+ MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */
+ MIPS_SYS(sys_waitid , 4)
+ MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */
+ MIPS_SYS(sys_add_key , 5)
+ MIPS_SYS(sys_request_key, 4)
+ MIPS_SYS(sys_keyctl , 5)
+ MIPS_SYS(sys_set_thread_area, 1)
+ MIPS_SYS(sys_inotify_init, 0)
+ MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */
+ MIPS_SYS(sys_inotify_rm_watch, 2)
+ MIPS_SYS(sys_migrate_pages, 4)
+ MIPS_SYS(sys_openat, 4)
+ MIPS_SYS(sys_mkdirat, 3)
+ MIPS_SYS(sys_mknodat, 4) /* 4290 */
+ MIPS_SYS(sys_fchownat, 5)
+ MIPS_SYS(sys_futimesat, 3)
+ MIPS_SYS(sys_fstatat64, 4)
+ MIPS_SYS(sys_unlinkat, 3)
+ MIPS_SYS(sys_renameat, 4) /* 4295 */
+ MIPS_SYS(sys_linkat, 5)
+ MIPS_SYS(sys_symlinkat, 3)
+ MIPS_SYS(sys_readlinkat, 4)
+ MIPS_SYS(sys_fchmodat, 3)
+ MIPS_SYS(sys_faccessat, 3) /* 4300 */
+ MIPS_SYS(sys_pselect6, 6)
+ MIPS_SYS(sys_ppoll, 5)
+ MIPS_SYS(sys_unshare, 1)
+ MIPS_SYS(sys_splice, 4)
+ MIPS_SYS(sys_sync_file_range, 7) /* 4305 */
+ MIPS_SYS(sys_tee, 4)
+ MIPS_SYS(sys_vmsplice, 4)
+ MIPS_SYS(sys_move_pages, 6)
+ MIPS_SYS(sys_set_robust_list, 2)
+ MIPS_SYS(sys_get_robust_list, 3) /* 4310 */
+ MIPS_SYS(sys_kexec_load, 4)
+ MIPS_SYS(sys_getcpu, 3)
+ MIPS_SYS(sys_epoll_pwait, 6)
+ MIPS_SYS(sys_ioprio_set, 3)
+ MIPS_SYS(sys_ioprio_get, 2)
+};
+
+#undef MIPS_SYS
+
+static int do_store_exclusive(CPUMIPSState *env)
+{
+ target_ulong addr;
+ target_ulong page_addr;
+ target_ulong val;
+ int flags;
+ int segv = 0;
+ int reg;
+ int d;
+
+ addr = env->lladdr;
+ page_addr = addr & TARGET_PAGE_MASK;
+ start_exclusive();
+ mmap_lock();
+ flags = page_get_flags(page_addr);
+ if ((flags & PAGE_READ) == 0) {
+ segv = 1;
+ } else {
+ reg = env->llreg & 0x1f;
+ d = (env->llreg & 0x20) != 0;
+ if (d) {
+ segv = get_user_s64(val, addr);
+ } else {
+ segv = get_user_s32(val, addr);
+ }
+ if (!segv) {
+ if (val != env->llval) {
+ env->active_tc.gpr[reg] = 0;
+ } else {
+ if (d) {
+ segv = put_user_u64(env->llnewval, addr);
+ } else {
+ segv = put_user_u32(env->llnewval, addr);
+ }
+ if (!segv) {
+ env->active_tc.gpr[reg] = 1;
+ }
+ }
+ }
+ }
+ env->lladdr = -1;
+ if (!segv) {
+ env->active_tc.PC += 4;
+ }
+ mmap_unlock();
+ end_exclusive();
+ return segv;
+}
+
+void cpu_loop(CPUMIPSState *env)
+{
+ target_siginfo_t info;
+ int trapnr, ret;
+ unsigned int syscall_num;
+
+ for(;;) {
+ cpu_exec_start(env);
+ trapnr = cpu_mips_exec(env);
+ cpu_exec_end(env);
+ switch(trapnr) {
+ case EXCP_SYSCALL:
+ syscall_num = env->active_tc.gpr[2] - 4000;
+ env->active_tc.PC += 4;
+ if (syscall_num >= sizeof(mips_syscall_args)) {
+ ret = -ENOSYS;
+ } else {
+ int nb_args;
+ abi_ulong sp_reg;
+ abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
+
+ nb_args = mips_syscall_args[syscall_num];
+ sp_reg = env->active_tc.gpr[29];
+ switch (nb_args) {
+ /* these arguments are taken from the stack */
+ /* FIXME - what to do if get_user() fails? */
+ case 8: get_user_ual(arg8, sp_reg + 28);
+ case 7: get_user_ual(arg7, sp_reg + 24);
+ case 6: get_user_ual(arg6, sp_reg + 20);
+ case 5: get_user_ual(arg5, sp_reg + 16);
+ default:
+ break;
+ }
+ ret = do_syscall(env, env->active_tc.gpr[2],
+ env->active_tc.gpr[4],
+ env->active_tc.gpr[5],
+ env->active_tc.gpr[6],
+ env->active_tc.gpr[7],
+ arg5, arg6/*, arg7, arg8*/);
+ }
+ if (ret == -TARGET_QEMU_ESIGRETURN) {
+ /* Returning from a successful sigreturn syscall.
+ Avoid clobbering register state. */
+ break;
+ }
+ if ((unsigned int)ret >= (unsigned int)(-1133)) {
+ env->active_tc.gpr[7] = 1; /* error flag */
+ ret = -ret;
+ } else {
+ env->active_tc.gpr[7] = 0; /* error flag */
+ }
+ env->active_tc.gpr[2] = ret;
+ break;
+ case EXCP_TLBL:
+ case EXCP_TLBS:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->CP0_BadVAddr;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_CpU:
+ case EXCP_RI:
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = 0;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ case EXCP_SC:
+ if (do_store_exclusive(env)) {
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->active_tc.PC;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ default:
+ // error:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ abort();
+ }
+ process_pending_signals(env);
+ }
+}
+#endif
+
+#ifdef TARGET_SH4
+void cpu_loop (CPUState *env)
+{
+ int trapnr, ret;
+ target_siginfo_t info;
+
+ while (1) {
+ trapnr = cpu_sh4_exec (env);
+
+ switch (trapnr) {
+ case 0x160:
+ env->pc += 2;
+ ret = do_syscall(env,
+ env->gregs[3],
+ env->gregs[4],
+ env->gregs[5],
+ env->gregs[6],
+ env->gregs[7],
+ env->gregs[0],
+ env->gregs[1]);
+ env->gregs[0] = ret;
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ case 0xa0:
+ case 0xc0:
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->tea;
+ queue_signal(env, info.si_signo, &info);
+ break;
+
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+#endif
+
+#ifdef TARGET_CRIS
+void cpu_loop (CPUState *env)
+{
+ int trapnr, ret;
+ target_siginfo_t info;
+
+ while (1) {
+ trapnr = cpu_cris_exec (env);
+ switch (trapnr) {
+ case 0xaa:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->pregs[PR_EDA];
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_BREAK:
+ ret = do_syscall(env,
+ env->regs[9],
+ env->regs[10],
+ env->regs[11],
+ env->regs[12],
+ env->regs[13],
+ env->pregs[7],
+ env->pregs[11]);
+ env->regs[10] = ret;
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+#endif
+
+#ifdef TARGET_MICROBLAZE
+void cpu_loop (CPUState *env)
+{
+ int trapnr, ret;
+ target_siginfo_t info;
+
+ while (1) {
+ trapnr = cpu_mb_exec (env);
+ switch (trapnr) {
+ case 0xaa:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = 0;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_BREAK:
+ /* Return address is 4 bytes after the call. */
+ env->regs[14] += 4;
+ ret = do_syscall(env,
+ env->regs[12],
+ env->regs[5],
+ env->regs[6],
+ env->regs[7],
+ env->regs[8],
+ env->regs[9],
+ env->regs[10]);
+ env->regs[3] = ret;
+ env->sregs[SR_PC] = env->regs[14];
+ break;
+ case EXCP_HW_EXCP:
+ env->regs[17] = env->sregs[SR_PC] + 4;
+ if (env->iflags & D_FLAG) {
+ env->sregs[SR_ESR] |= 1 << 12;
+ env->sregs[SR_PC] -= 4;
+ /* FIXME: if branch was immed, replay the imm aswell. */
+ }
+
+ env->iflags &= ~(IMM_FLAG | D_FLAG);
+
+ switch (env->sregs[SR_ESR] & 31) {
+ case ESR_EC_FPU:
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ if (env->sregs[SR_FSR] & FSR_IO) {
+ info.si_code = TARGET_FPE_FLTINV;
+ }
+ if (env->sregs[SR_FSR] & FSR_DZ) {
+ info.si_code = TARGET_FPE_FLTDIV;
+ }
+ info._sifields._sigfault._addr = 0;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ default:
+ printf ("Unhandled hw-exception: 0x%x\n",
+ env->sregs[SR_ESR] & 5);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ break;
+ }
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+#endif
+
+#ifdef TARGET_M68K
+
+void cpu_loop(CPUM68KState *env)
+{
+ int trapnr;
+ unsigned int n;
+ target_siginfo_t info;
+ TaskState *ts = env->opaque;
+
+ for(;;) {
+ trapnr = cpu_m68k_exec(env);
+ switch(trapnr) {
+ case EXCP_ILLEGAL:
+ {
+ if (ts->sim_syscalls) {
+ uint16_t nr;
+ nr = lduw(env->pc + 2);
+ env->pc += 4;
+ do_m68k_simcall(env, nr);
+ } else {
+ goto do_sigill;
+ }
+ }
+ break;
+ case EXCP_HALT_INSN:
+ /* Semihosing syscall. */
+ env->pc += 4;
+ do_m68k_semihosting(env, env->dregs[0]);
+ break;
+ case EXCP_LINEA:
+ case EXCP_LINEF:
+ case EXCP_UNSUPPORTED:
+ do_sigill:
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPN;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_TRAP0:
+ {
+ ts->sim_syscalls = 0;
+ n = env->dregs[0];
+ env->pc += 2;
+ env->dregs[0] = do_syscall(env,
+ n,
+ env->dregs[1],
+ env->dregs[2],
+ env->dregs[3],
+ env->dregs[4],
+ env->dregs[5],
+ env->aregs[0]);
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_ACCESS:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->mmu.ar;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig)
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ default:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ abort();
+ }
+ process_pending_signals(env);
+ }
+}
+#endif /* TARGET_M68K */
+
+#ifdef TARGET_ALPHA
+static void do_store_exclusive(CPUAlphaState *env, int reg, int quad)
+{
+ target_ulong addr, val, tmp;
+ target_siginfo_t info;
+ int ret = 0;
+
+ addr = env->lock_addr;
+ tmp = env->lock_st_addr;
+ env->lock_addr = -1;
+ env->lock_st_addr = 0;
+
+ start_exclusive();
+ mmap_lock();
+
+ if (addr == tmp) {
+ if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
+ goto do_sigsegv;
+ }
+
+ if (val == env->lock_value) {
+ tmp = env->ir[reg];
+ if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) {
+ goto do_sigsegv;
+ }
+ ret = 1;
+ }
+ }
+ env->ir[reg] = ret;
+ env->pc += 4;
+
+ mmap_unlock();
+ end_exclusive();
+ return;
+
+ do_sigsegv:
+ mmap_unlock();
+ end_exclusive();
+
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = addr;
+ queue_signal(env, TARGET_SIGSEGV, &info);
+}
+
+void cpu_loop (CPUState *env)
+{
+ int trapnr;
+ target_siginfo_t info;
+ abi_long sysret;
+
+ while (1) {
+ trapnr = cpu_alpha_exec (env);
+
+ /* All of the traps imply a transition through PALcode, which
+ implies an REI instruction has been executed. Which means
+ that the intr_flag should be cleared. */
+ env->intr_flag = 0;
+
+ switch (trapnr) {
+ case EXCP_RESET:
+ fprintf(stderr, "Reset requested. Exit\n");
+ exit(1);
+ break;
+ case EXCP_MCHK:
+ fprintf(stderr, "Machine check exception. Exit\n");
+ exit(1);
+ break;
+ case EXCP_ARITH:
+ env->lock_addr = -1;
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_FLTINV;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_HW_INTERRUPT:
+ fprintf(stderr, "External interrupt. Exit\n");
+ exit(1);
+ break;
+ case EXCP_DFAULT:
+ env->lock_addr = -1;
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = (page_get_flags(env->ipr[IPR_EXC_ADDR]) & PAGE_VALID
+ ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
+ info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_DTB_MISS_PAL:
+ fprintf(stderr, "MMU data TLB miss in PALcode\n");
+ exit(1);
+ break;
+ case EXCP_ITB_MISS:
+ fprintf(stderr, "MMU instruction TLB miss\n");
+ exit(1);
+ break;
+ case EXCP_ITB_ACV:
+ fprintf(stderr, "MMU instruction access violation\n");
+ exit(1);
+ break;
+ case EXCP_DTB_MISS_NATIVE:
+ fprintf(stderr, "MMU data TLB miss\n");
+ exit(1);
+ break;
+ case EXCP_UNALIGN:
+ env->lock_addr = -1;
+ info.si_signo = TARGET_SIGBUS;
+ info.si_errno = 0;
+ info.si_code = TARGET_BUS_ADRALN;
+ info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_OPCDEC:
+ do_sigill:
+ env->lock_addr = -1;
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPC;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_FEN:
+ /* No-op. Linux simply re-enables the FPU. */
+ break;
+ case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1):
+ env->lock_addr = -1;
+ switch ((trapnr >> 6) | 0x80) {
+ case 0x80:
+ /* BPT */
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case 0x81:
+ /* BUGCHK */
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = 0;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case 0x83:
+ /* CALLSYS */
+ trapnr = env->ir[IR_V0];
+ sysret = do_syscall(env, trapnr,
+ env->ir[IR_A0], env->ir[IR_A1],
+ env->ir[IR_A2], env->ir[IR_A3],
+ env->ir[IR_A4], env->ir[IR_A5]);
+ if (trapnr == TARGET_NR_sigreturn
+ || trapnr == TARGET_NR_rt_sigreturn) {
+ break;
+ }
+ /* Syscall writes 0 to V0 to bypass error check, similar
+ to how this is handled internal to Linux kernel. */
+ if (env->ir[IR_V0] == 0) {
+ env->ir[IR_V0] = sysret;
+ } else {
+ env->ir[IR_V0] = (sysret < 0 ? -sysret : sysret);
+ env->ir[IR_A3] = (sysret < 0);
+ }
+ break;
+ case 0x86:
+ /* IMB */
+ /* ??? We can probably elide the code using page_unprotect
+ that is checking for self-modifying code. Instead we
+ could simply call tb_flush here. Until we work out the
+ changes required to turn off the extra write protection,
+ this can be a no-op. */
+ break;
+ case 0x9E:
+ /* RDUNIQUE */
+ /* Handled in the translator for usermode. */
+ abort();
+ case 0x9F:
+ /* WRUNIQUE */
+ /* Handled in the translator for usermode. */
+ abort();
+ case 0xAA:
+ /* GENTRAP */
+ info.si_signo = TARGET_SIGFPE;
+ switch (env->ir[IR_A0]) {
+ case TARGET_GEN_INTOVF:
+ info.si_code = TARGET_FPE_INTOVF;
+ break;
+ case TARGET_GEN_INTDIV:
+ info.si_code = TARGET_FPE_INTDIV;
+ break;
+ case TARGET_GEN_FLTOVF:
+ info.si_code = TARGET_FPE_FLTOVF;
+ break;
+ case TARGET_GEN_FLTUND:
+ info.si_code = TARGET_FPE_FLTUND;
+ break;
+ case TARGET_GEN_FLTINV:
+ info.si_code = TARGET_FPE_FLTINV;
+ break;
+ case TARGET_GEN_FLTINE:
+ info.si_code = TARGET_FPE_FLTRES;
+ break;
+ case TARGET_GEN_ROPRAND:
+ info.si_code = 0;
+ break;
+ default:
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_code = 0;
+ break;
+ }
+ info.si_errno = 0;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ default:
+ goto do_sigill;
+ }
+ break;
+ case EXCP_CALL_PALP ... (EXCP_CALL_PALE - 1):
+ goto do_sigill;
+ case EXCP_DEBUG:
+ info.si_signo = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (info.si_signo) {
+ env->lock_addr = -1;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_STL_C:
+ case EXCP_STQ_C:
+ do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
+ break;
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+#endif /* TARGET_ALPHA */
+
+static void usage(void)
+{
+ printf("qemu-" TARGET_ARCH " version " QEMU_VERSION QEMU_PKGVERSION ", Copyright (c) 2003-2008 Fabrice Bellard\n"
+ "usage: qemu-" TARGET_ARCH " [options] program [arguments...]\n"
+ "Linux CPU emulator (compiled for %s emulation)\n"
+ "\n"
+ "Standard options:\n"
+ "-h print this help\n"
+ "-g port wait gdb connection to port\n"
+ "-L path set the elf interpreter prefix (default=%s)\n"
+ "-s size set the stack size in bytes (default=%ld)\n"
+ "-cpu model select CPU (-cpu ? for list)\n"
+ "-drop-ld-preload drop LD_PRELOAD for target process\n"
+ "-E var=value sets/modifies targets environment variable(s)\n"
+ "-U var unsets targets environment variable(s)\n"
+ "-0 argv0 forces target process argv[0] to be argv0\n"
+#if defined(CONFIG_USE_GUEST_BASE)
+ "-B address set guest_base address to address\n"
+ "-R size reserve size bytes for guest virtual address space\n"
+#endif
+ "\n"
+ "Debug options:\n"
+ "-d options activate log (logfile=%s)\n"
+ "-p pagesize set the host page size to 'pagesize'\n"
+ "-singlestep always run in singlestep mode\n"
+ "-strace log system calls\n"
+ "\n"
+ "Environment variables:\n"
+ "QEMU_STRACE Print system calls and arguments similar to the\n"
+ " 'strace' program. Enable by setting to any value.\n"
+ "You can use -E and -U options to set/unset environment variables\n"
+ "for target process. It is possible to provide several variables\n"
+ "by repeating the option. For example:\n"
+ " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
+ "Note that if you provide several changes to single variable\n"
+ "last change will stay in effect.\n"
+ ,
+ TARGET_ARCH,
+ interp_prefix,
+ guest_stack_size,
+ DEBUG_LOGFILE);
+ exit(1);
+}
+
+THREAD CPUState *thread_env;
+
+void task_settid(TaskState *ts)
+{
+ if (ts->ts_tid == 0) {
+#ifdef CONFIG_USE_NPTL
+ ts->ts_tid = (pid_t)syscall(SYS_gettid);
+#else
+ /* when no threads are used, tid becomes pid */
+ ts->ts_tid = getpid();
+#endif
+ }
+}
+
+void stop_all_tasks(void)
+{
+ /*
+ * We trust that when using NPTL, start_exclusive()
+ * handles thread stopping correctly.
+ */
+ start_exclusive();
+}
+
+/* Assumes contents are already zeroed. */
+void init_task_state(TaskState *ts)
+{
+ int i;
+
+ ts->used = 1;
+ ts->first_free = ts->sigqueue_table;
+ for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
+ ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];
+ }
+ ts->sigqueue_table[i].next = NULL;
+}
+
+int main(int argc, char **argv, char **envp)
+{
+ const char *filename;
+ const char *cpu_model;
+ struct target_pt_regs regs1, *regs = &regs1;
+ struct image_info info1, *info = &info1;
+ struct linux_binprm bprm;
+ TaskState *ts;
+ CPUState *env;
+ int optind;
+ const char *r;
+ int gdbstub_port = 0;
+ char **target_environ, **wrk;
+ char **target_argv;
+ int target_argc;
+ envlist_t *envlist = NULL;
+ const char *argv0 = NULL;
+ int i;
+ int ret;
+
+ if (argc <= 1)
+ usage();
+
+ qemu_cache_utils_init(envp);
+
+ /* init debug */
+ cpu_set_log_filename(DEBUG_LOGFILE);
+
+ if ((envlist = envlist_create()) == NULL) {
+ (void) fprintf(stderr, "Unable to allocate envlist\n");
+ exit(1);
+ }
+
+ /* add current environment into the list */
+ for (wrk = environ; *wrk != NULL; wrk++) {
+ (void) envlist_setenv(envlist, *wrk);
+ }
+
+ /* Read the stack limit from the kernel. If it's "unlimited",
+ then we can do little else besides use the default. */
+ {
+ struct rlimit lim;
+ if (getrlimit(RLIMIT_STACK, &lim) == 0
+ && lim.rlim_cur != RLIM_INFINITY
+ && lim.rlim_cur == (target_long)lim.rlim_cur) {
+ guest_stack_size = lim.rlim_cur;
+ }
+ }
+
+ cpu_model = NULL;
+#if defined(cpudef_setup)
+ cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
+#endif
+
+ optind = 1;
+ for(;;) {
+ if (optind >= argc)
+ break;
+ r = argv[optind];
+ if (r[0] != '-')
+ break;
+ optind++;
+ r++;
+ if (!strcmp(r, "-")) {
+ break;
+ } else if (!strcmp(r, "d")) {
+ int mask;
+ const CPULogItem *item;
+
+ if (optind >= argc)
+ break;
+
+ r = argv[optind++];
+ mask = cpu_str_to_log_mask(r);
+ if (!mask) {
+ printf("Log items (comma separated):\n");
+ for(item = cpu_log_items; item->mask != 0; item++) {
+ printf("%-10s %s\n", item->name, item->help);
+ }
+ exit(1);
+ }
+ cpu_set_log(mask);
+ } else if (!strcmp(r, "E")) {
+ r = argv[optind++];
+ if (envlist_setenv(envlist, r) != 0)
+ usage();
+ } else if (!strcmp(r, "ignore-environment")) {
+ envlist_free(envlist);
+ if ((envlist = envlist_create()) == NULL) {
+ (void) fprintf(stderr, "Unable to allocate envlist\n");
+ exit(1);
+ }
+ } else if (!strcmp(r, "U")) {
+ r = argv[optind++];
+ if (envlist_unsetenv(envlist, r) != 0)
+ usage();
+ } else if (!strcmp(r, "0")) {
+ r = argv[optind++];
+ argv0 = r;
+ } else if (!strcmp(r, "s")) {
+ if (optind >= argc)
+ break;
+ r = argv[optind++];
+ guest_stack_size = strtoul(r, (char **)&r, 0);
+ if (guest_stack_size == 0)
+ usage();
+ if (*r == 'M')
+ guest_stack_size *= 1024 * 1024;
+ else if (*r == 'k' || *r == 'K')
+ guest_stack_size *= 1024;
+ } else if (!strcmp(r, "L")) {
+ interp_prefix = argv[optind++];
+ } else if (!strcmp(r, "p")) {
+ if (optind >= argc)
+ break;
+ qemu_host_page_size = atoi(argv[optind++]);
+ if (qemu_host_page_size == 0 ||
+ (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
+ fprintf(stderr, "page size must be a power of two\n");
+ exit(1);
+ }
+ } else if (!strcmp(r, "g")) {
+ if (optind >= argc)
+ break;
+ gdbstub_port = atoi(argv[optind++]);
+ } else if (!strcmp(r, "r")) {
+ qemu_uname_release = argv[optind++];
+ } else if (!strcmp(r, "cpu")) {
+ cpu_model = argv[optind++];
+ if (cpu_model == NULL || strcmp(cpu_model, "?") == 0) {
+/* XXX: implement xxx_cpu_list for targets that still miss it */
+#if defined(cpu_list_id)
+ cpu_list_id(stdout, &fprintf, "");
+#elif defined(cpu_list)
+ cpu_list(stdout, &fprintf); /* deprecated */
+#endif
+ exit(1);
+ }
+#if defined(CONFIG_USE_GUEST_BASE)
+ } else if (!strcmp(r, "B")) {
+ guest_base = strtol(argv[optind++], NULL, 0);
+ have_guest_base = 1;
+ } else if (!strcmp(r, "R")) {
+ char *p;
+ int shift = 0;
+ reserved_va = strtoul(argv[optind++], &p, 0);
+ switch (*p) {
+ case 'k':
+ case 'K':
+ shift = 10;
+ break;
+ case 'M':
+ shift = 20;
+ break;
+ case 'G':
+ shift = 30;
+ break;
+ }
+ if (shift) {
+ unsigned long unshifted = reserved_va;
+ p++;
+ reserved_va <<= shift;
+ if (((reserved_va >> shift) != unshifted)
+#if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
+ || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS))
+#endif
+ ) {
+ fprintf(stderr, "Reserved virtual address too big\n");
+ exit(1);
+ }
+ }
+ if (*p) {
+ fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
+ exit(1);
+ }
+#endif
+ } else if (!strcmp(r, "drop-ld-preload")) {
+ (void) envlist_unsetenv(envlist, "LD_PRELOAD");
+ } else if (!strcmp(r, "singlestep")) {
+ singlestep = 1;
+ } else if (!strcmp(r, "strace")) {
+ do_strace = 1;
+ } else
+ {
+ usage();
+ }
+ }
+ if (optind >= argc)
+ usage();
+ filename = argv[optind];
+ exec_path = argv[optind];
+
+ /* Zero out regs */
+ memset(regs, 0, sizeof(struct target_pt_regs));
+
+ /* Zero out image_info */
+ memset(info, 0, sizeof(struct image_info));
+
+ memset(&bprm, 0, sizeof (bprm));
+
+ /* Scan interp_prefix dir for replacement files. */
+ init_paths(interp_prefix);
+
+ if (cpu_model == NULL) {
+#if defined(TARGET_I386)
+#ifdef TARGET_X86_64
+ cpu_model = "qemu64";
+#else
+ cpu_model = "qemu32";
+#endif
+#elif defined(TARGET_ARM)
+ cpu_model = "any";
+#elif defined(TARGET_M68K)
+ cpu_model = "any";
+#elif defined(TARGET_SPARC)
+#ifdef TARGET_SPARC64
+ cpu_model = "TI UltraSparc II";
+#else
+ cpu_model = "Fujitsu MB86904";
+#endif
+#elif defined(TARGET_MIPS)
+#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
+ cpu_model = "20Kc";
+#else
+ cpu_model = "24Kf";
+#endif
+#elif defined(TARGET_PPC)
+#ifdef TARGET_PPC64
+ cpu_model = "970fx";
+#else
+ cpu_model = "750";
+#endif
+#else
+ cpu_model = "any";
+#endif
+ }
+ cpu_exec_init_all(0);
+ /* NOTE: we need to init the CPU at this stage to get
+ qemu_host_page_size */
+ env = cpu_init(cpu_model);
+ if (!env) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+#if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
+ cpu_reset(env);
+#endif
+
+ thread_env = env;
+
+ if (getenv("QEMU_STRACE")) {
+ do_strace = 1;
+ }
+
+ target_environ = envlist_to_environ(envlist, NULL);
+ envlist_free(envlist);
+
+#if defined(CONFIG_USE_GUEST_BASE)
+ /*
+ * Now that page sizes are configured in cpu_init() we can do
+ * proper page alignment for guest_base.
+ */
+ guest_base = HOST_PAGE_ALIGN(guest_base);
+
+ if (reserved_va) {
+ void *p;
+ int flags;
+
+ flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
+ if (have_guest_base) {
+ flags |= MAP_FIXED;
+ }
+ p = mmap((void *)guest_base, reserved_va, PROT_NONE, flags, -1, 0);
+ if (p == MAP_FAILED) {
+ fprintf(stderr, "Unable to reserve guest address space\n");
+ exit(1);
+ }
+ guest_base = (unsigned long)p;
+ /* Make sure the address is properly aligned. */
+ if (guest_base & ~qemu_host_page_mask) {
+ munmap(p, reserved_va);
+ p = mmap((void *)guest_base, reserved_va + qemu_host_page_size,
+ PROT_NONE, flags, -1, 0);
+ if (p == MAP_FAILED) {
+ fprintf(stderr, "Unable to reserve guest address space\n");
+ exit(1);
+ }
+ guest_base = HOST_PAGE_ALIGN((unsigned long)p);
+ }
+ qemu_log("Reserved 0x%lx bytes of guest address space\n", reserved_va);
+ }
+#endif /* CONFIG_USE_GUEST_BASE */
+
+ /*
+ * Read in mmap_min_addr kernel parameter. This value is used
+ * When loading the ELF image to determine whether guest_base
+ * is needed. It is also used in mmap_find_vma.
+ */
+ {
+ FILE *fp;
+
+ if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
+ unsigned long tmp;
+ if (fscanf(fp, "%lu", &tmp) == 1) {
+ mmap_min_addr = tmp;
+ qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr);
+ }
+ fclose(fp);
+ }
+ }
+
+ /*
+ * Prepare copy of argv vector for target.
+ */
+ target_argc = argc - optind;
+ target_argv = calloc(target_argc + 1, sizeof (char *));
+ if (target_argv == NULL) {
+ (void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
+ exit(1);
+ }
+
+ /*
+ * If argv0 is specified (using '-0' switch) we replace
+ * argv[0] pointer with the given one.
+ */
+ i = 0;
+ if (argv0 != NULL) {
+ target_argv[i++] = strdup(argv0);
+ }
+ for (; i < target_argc; i++) {
+ target_argv[i] = strdup(argv[optind + i]);
+ }
+ target_argv[target_argc] = NULL;
+
+ ts = qemu_mallocz (sizeof(TaskState));
+ init_task_state(ts);
+ /* build Task State */
+ ts->info = info;
+ ts->bprm = &bprm;
+ env->opaque = ts;
+ task_settid(ts);
+
+ ret = loader_exec(filename, target_argv, target_environ, regs,
+ info, &bprm);
+ if (ret != 0) {
+ printf("Error %d while loading %s\n", ret, filename);
+ _exit(1);
+ }
+
+ for (i = 0; i < target_argc; i++) {
+ free(target_argv[i]);
+ }
+ free(target_argv);
+
+ for (wrk = target_environ; *wrk; wrk++) {
+ free(*wrk);
+ }
+
+ free(target_environ);
+
+ if (qemu_log_enabled()) {
+#if defined(CONFIG_USE_GUEST_BASE)
+ qemu_log("guest_base 0x%lx\n", guest_base);
+#endif
+ log_page_dump();
+
+ qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
+ qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
+ qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_code);
+ qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_data);
+ qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
+ qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_stack);
+ qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
+ qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
+ }
+
+ target_set_brk(info->brk);
+ syscall_init();
+ signal_init();
+
+#if defined(CONFIG_USE_GUEST_BASE)
+ /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
+ generating the prologue until now so that the prologue can take
+ the real value of GUEST_BASE into account. */
+ tcg_prologue_init(&tcg_ctx);
+#endif
+
+#if defined(TARGET_I386)
+ cpu_x86_set_cpl(env, 3);
+
+ env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
+ env->hflags |= HF_PE_MASK;
+ if (env->cpuid_features & CPUID_SSE) {
+ env->cr[4] |= CR4_OSFXSR_MASK;
+ env->hflags |= HF_OSFXSR_MASK;
+ }
+#ifndef TARGET_ABI32
+ /* enable 64 bit mode if possible */
+ if (!(env->cpuid_ext2_features & CPUID_EXT2_LM)) {
+ fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
+ exit(1);
+ }
+ env->cr[4] |= CR4_PAE_MASK;
+ env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
+ env->hflags |= HF_LMA_MASK;
+#endif
+
+ /* flags setup : we activate the IRQs by default as in user mode */
+ env->eflags |= IF_MASK;
+
+ /* linux register setup */
+#ifndef TARGET_ABI32
+ env->regs[R_EAX] = regs->rax;
+ env->regs[R_EBX] = regs->rbx;
+ env->regs[R_ECX] = regs->rcx;
+ env->regs[R_EDX] = regs->rdx;
+ env->regs[R_ESI] = regs->rsi;
+ env->regs[R_EDI] = regs->rdi;
+ env->regs[R_EBP] = regs->rbp;
+ env->regs[R_ESP] = regs->rsp;
+ env->eip = regs->rip;
+#else
+ env->regs[R_EAX] = regs->eax;
+ env->regs[R_EBX] = regs->ebx;
+ env->regs[R_ECX] = regs->ecx;
+ env->regs[R_EDX] = regs->edx;
+ env->regs[R_ESI] = regs->esi;
+ env->regs[R_EDI] = regs->edi;
+ env->regs[R_EBP] = regs->ebp;
+ env->regs[R_ESP] = regs->esp;
+ env->eip = regs->eip;
+#endif
+
+ /* linux interrupt setup */
+#ifndef TARGET_ABI32
+ env->idt.limit = 511;
+#else
+ env->idt.limit = 255;
+#endif
+ env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ idt_table = g2h(env->idt.base);
+ set_idt(0, 0);
+ set_idt(1, 0);
+ set_idt(2, 0);
+ set_idt(3, 3);
+ set_idt(4, 3);
+ set_idt(5, 0);
+ set_idt(6, 0);
+ set_idt(7, 0);
+ set_idt(8, 0);
+ set_idt(9, 0);
+ set_idt(10, 0);
+ set_idt(11, 0);
+ set_idt(12, 0);
+ set_idt(13, 0);
+ set_idt(14, 0);
+ set_idt(15, 0);
+ set_idt(16, 0);
+ set_idt(17, 0);
+ set_idt(18, 0);
+ set_idt(19, 0);
+ set_idt(0x80, 3);
+
+ /* linux segment setup */
+ {
+ uint64_t *gdt_table;
+ env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
+ gdt_table = g2h(env->gdt.base);
+#ifdef TARGET_ABI32
+ write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
+#else
+ /* 64 bit code segment */
+ write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ DESC_L_MASK |
+ (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
+#endif
+ write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
+ }
+ cpu_x86_load_seg(env, R_CS, __USER_CS);
+ cpu_x86_load_seg(env, R_SS, __USER_DS);
+#ifdef TARGET_ABI32
+ cpu_x86_load_seg(env, R_DS, __USER_DS);
+ cpu_x86_load_seg(env, R_ES, __USER_DS);
+ cpu_x86_load_seg(env, R_FS, __USER_DS);
+ cpu_x86_load_seg(env, R_GS, __USER_DS);
+ /* This hack makes Wine work... */
+ env->segs[R_FS].selector = 0;
+#else
+ cpu_x86_load_seg(env, R_DS, 0);
+ cpu_x86_load_seg(env, R_ES, 0);
+ cpu_x86_load_seg(env, R_FS, 0);
+ cpu_x86_load_seg(env, R_GS, 0);
+#endif
+#elif defined(TARGET_ARM)
+ {
+ int i;
+ cpsr_write(env, regs->uregs[16], 0xffffffff);
+ for(i = 0; i < 16; i++) {
+ env->regs[i] = regs->uregs[i];
+ }
+ }
+#elif defined(TARGET_SPARC)
+ {
+ int i;
+ env->pc = regs->pc;
+ env->npc = regs->npc;
+ env->y = regs->y;
+ for(i = 0; i < 8; i++)
+ env->gregs[i] = regs->u_regs[i];
+ for(i = 0; i < 8; i++)
+ env->regwptr[i] = regs->u_regs[i + 8];
+ }
+#elif defined(TARGET_PPC)
+ {
+ int i;
+
+#if defined(TARGET_PPC64)
+#if defined(TARGET_ABI32)
+ env->msr &= ~((target_ulong)1 << MSR_SF);
+#else
+ env->msr |= (target_ulong)1 << MSR_SF;
+#endif
+#endif
+ env->nip = regs->nip;
+ for(i = 0; i < 32; i++) {
+ env->gpr[i] = regs->gpr[i];
+ }
+ }
+#elif defined(TARGET_M68K)
+ {
+ env->pc = regs->pc;
+ env->dregs[0] = regs->d0;
+ env->dregs[1] = regs->d1;
+ env->dregs[2] = regs->d2;
+ env->dregs[3] = regs->d3;
+ env->dregs[4] = regs->d4;
+ env->dregs[5] = regs->d5;
+ env->dregs[6] = regs->d6;
+ env->dregs[7] = regs->d7;
+ env->aregs[0] = regs->a0;
+ env->aregs[1] = regs->a1;
+ env->aregs[2] = regs->a2;
+ env->aregs[3] = regs->a3;
+ env->aregs[4] = regs->a4;
+ env->aregs[5] = regs->a5;
+ env->aregs[6] = regs->a6;
+ env->aregs[7] = regs->usp;
+ env->sr = regs->sr;
+ ts->sim_syscalls = 1;
+ }
+#elif defined(TARGET_MICROBLAZE)
+ {
+ env->regs[0] = regs->r0;
+ env->regs[1] = regs->r1;
+ env->regs[2] = regs->r2;
+ env->regs[3] = regs->r3;
+ env->regs[4] = regs->r4;
+ env->regs[5] = regs->r5;
+ env->regs[6] = regs->r6;
+ env->regs[7] = regs->r7;
+ env->regs[8] = regs->r8;
+ env->regs[9] = regs->r9;
+ env->regs[10] = regs->r10;
+ env->regs[11] = regs->r11;
+ env->regs[12] = regs->r12;
+ env->regs[13] = regs->r13;
+ env->regs[14] = regs->r14;
+ env->regs[15] = regs->r15;
+ env->regs[16] = regs->r16;
+ env->regs[17] = regs->r17;
+ env->regs[18] = regs->r18;
+ env->regs[19] = regs->r19;
+ env->regs[20] = regs->r20;
+ env->regs[21] = regs->r21;
+ env->regs[22] = regs->r22;
+ env->regs[23] = regs->r23;
+ env->regs[24] = regs->r24;
+ env->regs[25] = regs->r25;
+ env->regs[26] = regs->r26;
+ env->regs[27] = regs->r27;
+ env->regs[28] = regs->r28;
+ env->regs[29] = regs->r29;
+ env->regs[30] = regs->r30;
+ env->regs[31] = regs->r31;
+ env->sregs[SR_PC] = regs->pc;
+ }
+#elif defined(TARGET_MIPS)
+ {
+ int i;
+
+ for(i = 0; i < 32; i++) {
+ env->active_tc.gpr[i] = regs->regs[i];
+ }
+ env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1;
+ if (regs->cp0_epc & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ }
+ }
+#elif defined(TARGET_SH4)
+ {
+ int i;
+
+ for(i = 0; i < 16; i++) {
+ env->gregs[i] = regs->regs[i];
+ }
+ env->pc = regs->pc;
+ }
+#elif defined(TARGET_ALPHA)
+ {
+ int i;
+
+ for(i = 0; i < 28; i++) {
+ env->ir[i] = ((abi_ulong *)regs)[i];
+ }
+ env->ir[IR_SP] = regs->usp;
+ env->pc = regs->pc;
+ }
+#elif defined(TARGET_CRIS)
+ {
+ env->regs[0] = regs->r0;
+ env->regs[1] = regs->r1;
+ env->regs[2] = regs->r2;
+ env->regs[3] = regs->r3;
+ env->regs[4] = regs->r4;
+ env->regs[5] = regs->r5;
+ env->regs[6] = regs->r6;
+ env->regs[7] = regs->r7;
+ env->regs[8] = regs->r8;
+ env->regs[9] = regs->r9;
+ env->regs[10] = regs->r10;
+ env->regs[11] = regs->r11;
+ env->regs[12] = regs->r12;
+ env->regs[13] = regs->r13;
+ env->regs[14] = info->start_stack;
+ env->regs[15] = regs->acr;
+ env->pc = regs->erp;
+ }
+#else
+#error unsupported target CPU
+#endif
+
+#if defined(TARGET_ARM) || defined(TARGET_M68K)
+ ts->stack_base = info->start_stack;
+ ts->heap_base = info->brk;
+ /* This will be filled in on the first SYS_HEAPINFO call. */
+ ts->heap_limit = 0;
+#endif
+
+ if (gdbstub_port) {
+ gdbserver_start (gdbstub_port);
+ gdb_handlesig(env, 0);
+ }
+ cpu_loop(env);
+ /* never exits */
+ return 0;
+}