summaryrefslogtreecommitdiff
path: root/src/recompiler/exec-all.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/recompiler/exec-all.h')
-rw-r--r--src/recompiler/exec-all.h275
1 files changed, 102 insertions, 173 deletions
diff --git a/src/recompiler/exec-all.h b/src/recompiler/exec-all.h
index 5e3af3382..c8e4db05d 100644
--- a/src/recompiler/exec-all.h
+++ b/src/recompiler/exec-all.h
@@ -14,8 +14,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -27,14 +26,14 @@
* of the LGPL is applied is otherwise unspecified.
*/
-/* allow to see translation results - the slowdown should be negligible, so we leave it */
-#ifndef VBOX
-#define DEBUG_DISAS
-#endif
+#ifndef _EXEC_ALL_H_
+#define _EXEC_ALL_H_
+#include "qemu-common.h"
#ifdef VBOX
# include <VBox/vmm/tm.h>
# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
+# include <VBox/vmm/em.h> /* EMRemIsLockOwner */
# ifndef LOG_GROUP
# define LOG_GROUP LOG_GROUP_REM
# endif
@@ -43,6 +42,20 @@
# include <VBox/vmm/vm.h>
#endif /* VBOX */
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#ifndef VBOX
+#define DEBUG_DISAS
+#endif /* !VBOX */
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
/* is_jmp field values */
#define DISAS_NEXT 0 /* next instruction can be analyzed */
#define DISAS_JUMP 1 /* only pc was modified dynamically */
@@ -52,32 +65,35 @@
typedef struct TranslationBlock TranslationBlock;
/* XXX: make safe guess about sizes */
-#define MAX_OP_PER_INSTR 64
-/* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
-#define MAX_OPC_PARAM 10
-#define OPC_BUF_SIZE 512
+#define MAX_OP_PER_INSTR 96
+
+#if HOST_LONG_BITS == 32
+#define MAX_OPC_PARAM_PER_ARG 2
+#else
+#define MAX_OPC_PARAM_PER_ARG 1
+#endif
+#define MAX_OPC_PARAM_IARGS 4
+#define MAX_OPC_PARAM_OARGS 1
+#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
+
+/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
+ * and up to 4 + N parameters on 64-bit archs
+ * (N = number of input arguments + output arguments). */
+#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
/* Maximum size a TCG op can expand to. This is complicated because a
single op may require several host instructions and register reloads.
- For now take a wild guess at 128 bytes, which should allow at least
+ For now take a wild guess at 192 bytes, which should allow at least
a couple of fixup instructions per argument. */
-#define TCG_MAX_OP_SIZE 128
+#define TCG_MAX_OP_SIZE 192
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
-extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
-extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
-extern target_ulong gen_opc_jump_pc[2];
-extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
-
-typedef void (GenOpFunc)(void);
-typedef void (GenOpFunc1)(long);
-typedef void (GenOpFunc2)(long, long);
-typedef void (GenOpFunc3)(long, long, long);
#include "qemu-log.h"
@@ -86,45 +102,30 @@ void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
unsigned long searched_pc, int pc_pos, void *puc);
-unsigned long code_gen_max_block_size(void);
void cpu_gen_init(void);
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr);
int cpu_restore_state(struct TranslationBlock *tb,
CPUState *env, unsigned long searched_pc,
void *puc);
-int cpu_restore_state_copy(struct TranslationBlock *tb,
- CPUState *env, unsigned long searched_pc,
- void *puc);
void cpu_resume_from_signal(CPUState *env1, void *puc);
void cpu_io_recompile(CPUState *env, void *retaddr);
TranslationBlock *tb_gen_code(CPUState *env,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
void cpu_exec_init(CPUState *env);
+void QEMU_NORETURN cpu_loop_exit(void);
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
-void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_page_range(target_ulong start, target_ulong end);
void tlb_flush_page(CPUState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global);
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu);
-#ifndef VBOX
-static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
-#else
-DECLINLINE(int) tlb_set_page(CPUState *env1, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
+#if !defined(CONFIG_USER_ONLY)
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size);
#endif
-{
- if (prot & PAGE_READ)
- prot |= PAGE_EXEC;
- return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
-}
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -142,15 +143,12 @@ DECLINLINE(int) tlb_set_page(CPUState *env1, target_ulong vaddr,
#define CODE_GEN_AVG_BLOCK_SIZE 64
#endif
-#if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__)
-#define USE_DIRECT_JUMP
-#endif
-#if defined(__i386__) && !defined(_WIN32)
+#if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
#define USE_DIRECT_JUMP
#endif
#ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */
-#undef USE_DIRECT_JUMP
+# undef USE_DIRECT_JUMP
#endif /* VBOX */
struct TranslationBlock {
@@ -162,9 +160,8 @@ struct TranslationBlock {
uint16_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x7fff
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
-
#ifdef VBOX
-#define CF_RAW_MODE 0x0010 /* block was generated in raw mode */
+# define CF_RAW_MODE 0x0010 /* block was generated in raw mode */
#endif
uint8_t *tc_ptr; /* pointer to the translated code */
@@ -173,13 +170,13 @@ struct TranslationBlock {
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
struct TranslationBlock *page_next[2];
- target_ulong page_addr[2];
+ tb_page_addr_t page_addr[2];
/* the following data are used to directly call another TB from
the code of this one. */
uint16_t tb_next_offset[2]; /* offset of original jump target */
#ifdef USE_DIRECT_JUMP
- uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
+ uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
#else
unsigned long tb_next[2]; /* address of jump generated code */
#endif
@@ -192,35 +189,22 @@ struct TranslationBlock {
uint32_t icount;
};
-#ifndef VBOX
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
-#else
-DECLINLINE(unsigned int) tb_jmp_cache_hash_page(target_ulong pc)
-#endif
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
+ return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
-#ifndef VBOX
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-#else
-DECLINLINE(unsigned int) tb_jmp_cache_hash_func(target_ulong pc)
-#endif
-
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
- (tmp & TB_JMP_ADDR_MASK));
+ return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
+ | (tmp & TB_JMP_ADDR_MASK));
}
-#ifndef VBOX
-static inline unsigned int tb_phys_hash_func(unsigned long pc)
-#else
-DECLINLINE(unsigned int) tb_phys_hash_func(unsigned long pc)
-#endif
+static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
{
return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
@@ -228,40 +212,49 @@ DECLINLINE(unsigned int) tb_phys_hash_func(unsigned long pc)
TranslationBlock *tb_alloc(target_ulong pc);
void tb_free(TranslationBlock *tb);
void tb_flush(CPUState *env);
-void tb_link_phys(TranslationBlock *tb,
- target_ulong phys_pc, target_ulong phys_page2);
-void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
+void tb_link_page(TranslationBlock *tb,
+ tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
-extern uint8_t *code_gen_ptr;
-extern int code_gen_max_blocks;
-
#if defined(USE_DIRECT_JUMP)
-#if defined(__powerpc__)
+#if defined(_ARCH_PPC)
+extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
+#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__i386__) || defined(__x86_64__)
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
{
- uint32_t val, *ptr;
-
/* patch the branch destination */
- ptr = (uint32_t *)jmp_addr;
- val = *ptr;
- val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
- *ptr = val;
- /* flush icache */
- asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
- asm volatile ("sync" : : : "memory");
- asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
- asm volatile ("sync" : : : "memory");
- asm volatile ("isync" : : : "memory");
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
}
-#elif defined(__i386__)
+#elif defined(__arm__)
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
{
- /* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
- /* no need to flush icache explicitely */
+#if QEMU_GNUC_PREREQ(4, 1)
+ void __clear_cache(char *beg, char *end);
+#else
+ register unsigned long _beg __asm ("a1");
+ register unsigned long _end __asm ("a2");
+ register unsigned long _flg __asm ("a3");
+#endif
+
+ /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+ *(uint32_t *)jmp_addr =
+ (*(uint32_t *)jmp_addr & ~0xffffff)
+ | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
+
+#if QEMU_GNUC_PREREQ(4, 1)
+ __clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
+#else
+ /* flush icache */
+ _beg = jmp_addr;
+ _end = jmp_addr + 4;
+ _flg = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
}
#endif
@@ -272,34 +265,21 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
offset = tb->tb_jmp_offset[n];
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
- offset = tb->tb_jmp_offset[n + 2];
- if (offset != 0xffff)
- tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
}
#else
/* set the jump target */
-#ifndef VBOX
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, unsigned long addr)
-#else
-DECLINLINE(void) tb_set_jmp_target(TranslationBlock *tb,
- int n, unsigned long addr)
-#endif
{
tb->tb_next[n] = addr;
}
#endif
-#ifndef VBOX
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
-#else
-DECLINLINE(void) tb_add_jump(TranslationBlock *tb, int n,
- TranslationBlock *tb_next)
-#endif
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
@@ -314,28 +294,6 @@ DECLINLINE(void) tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_find_pc(unsigned long pc_ptr);
-#ifndef offsetof
-#define offsetof(type, field) ((size_t) &((type *)0)->field)
-#endif
-
-#if defined(_WIN32)
-#define ASM_DATA_SECTION ".section \".data\"\n"
-#define ASM_PREVIOUS_SECTION ".section .text\n"
-#elif defined(__APPLE__)
-#define ASM_DATA_SECTION ".data\n"
-#define ASM_PREVIOUS_SECTION ".text\n"
-#else
-#define ASM_DATA_SECTION ".section \".data\"\n"
-#define ASM_PREVIOUS_SECTION ".previous\n"
-#endif
-
-#define ASM_OP_LABEL_NAME(n, opname) \
- ASM_NAME(__op_label) #n "." ASM_NAME(opname)
-
-extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
-extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
-extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
-
#include "qemu-lock.h"
extern spinlock_t tb_lock;
@@ -344,6 +302,10 @@ extern int tb_invalidated_flag;
#if !defined(CONFIG_USER_ONLY)
+extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
+extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+
void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
void *retaddr);
@@ -372,7 +334,7 @@ void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
#endif
#if defined(CONFIG_USER_ONLY)
-static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
+static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
{
return addr;
}
@@ -383,13 +345,10 @@ target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, C
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
is the offset relative to phys_ram_base */
-#ifndef VBOX
-static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
-#else
-DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env1, target_ulong addr)
-#endif
+static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
{
int mmu_idx, page_index, pd;
+ void *p;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env1);
@@ -410,60 +369,30 @@ DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env1, target_ulong addr)
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
#endif
}
-
# if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
return addr + env1->tlb_table[mmu_idx][page_index].addend;
# elif defined(VBOX)
Assert(env1->phys_addends[mmu_idx][page_index] != -1);
return addr + env1->phys_addends[mmu_idx][page_index];
# else
- return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
+ p = (void *)(unsigned long)addr
+ + env1->tlb_table[mmu_idx][page_index].addend;
+ return qemu_ram_addr_from_host(p);
# endif
}
-
-
-/* Deterministic execution requires that IO only be performed on the last
- instruction of a TB so that interrupts take effect immediately. */
-#ifndef VBOX
-static inline int can_do_io(CPUState *env)
-#else
-DECLINLINE(int) can_do_io(CPUState *env)
#endif
-{
- if (!use_icount)
- return 1;
-
- /* If not executing code then assume we are ok. */
- if (!env->current_tb)
- return 1;
- return env->can_do_io != 0;
-}
-#endif
+typedef void (CPUDebugExcpHandler)(CPUState *env);
+CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
-#ifdef USE_KQEMU
-#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
+#ifndef VBOX
+/* vl.c */
+extern int singlestep;
-int kqemu_init(CPUState *env);
-int kqemu_cpu_exec(CPUState *env);
-void kqemu_flush_page(CPUState *env, target_ulong addr);
-void kqemu_flush(CPUState *env, int global);
-void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
-void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
-void kqemu_cpu_interrupt(CPUState *env);
-void kqemu_record_dump(void);
+/* cpu-exec.c */
+extern volatile sig_atomic_t exit_request;
+#endif /*!VBOX*/
-static inline int kqemu_is_ok(CPUState *env)
-{
- return(env->kqemu_enabled &&
- (env->cr[0] & CR0_PE_MASK) &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (env->eflags & IF_MASK) &&
- !(env->eflags & VM_MASK) &&
- (env->kqemu_enabled == 2 ||
- ((env->hflags & HF_CPL_MASK) == 3 &&
- (env->eflags & IOPL_MASK) != IOPL_MASK)));
-}
#endif