x86_64 target support

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1197 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
bellard 2005-01-03 23:50:08 +00:00
parent c46878786a
commit 14ce26e755
11 changed files with 3686 additions and 1308 deletions

View File

@ -20,7 +20,13 @@
#ifndef CPU_I386_H
#define CPU_I386_H
#include "config.h"
#ifdef TARGET_X86_64
#define TARGET_LONG_BITS 64
#else
#define TARGET_LONG_BITS 32
#endif
/* target supports implicit self modifying code */
#define TARGET_HAS_SMC
@ -63,6 +69,8 @@
#define DESC_G_MASK (1 << 23)
#define DESC_B_SHIFT 22
#define DESC_B_MASK (1 << DESC_B_SHIFT)
#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
#define DESC_L_MASK (1 << DESC_L_SHIFT)
#define DESC_AVL_MASK (1 << 20)
#define DESC_P_MASK (1 << 15)
#define DESC_DPL_SHIFT 13
@ -125,6 +133,8 @@
#define HF_EM_SHIFT 10
#define HF_TS_SHIFT 11
#define HF_IOPL_SHIFT 12 /* must be same as eflags */
#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
#define HF_VM_SHIFT 17 /* must be same as eflags */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
@ -138,6 +148,8 @@
#define HF_MP_MASK (1 << HF_MP_SHIFT)
#define HF_EM_MASK (1 << HF_EM_SHIFT)
#define HF_TS_MASK (1 << HF_TS_SHIFT)
#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define CR0_PE_MASK (1 << 0)
#define CR0_MP_MASK (1 << 1)
@ -156,6 +168,9 @@
#define CR4_PSE_MASK (1 << 4)
#define CR4_PAE_MASK (1 << 5)
#define CR4_PGE_MASK (1 << 7)
#define CR4_PCE_MASK (1 << 8)
#define CR4_OSFXSR_MASK (1 << 9)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
#define PG_PRESENT_BIT 0
#define PG_RW_BIT 1
@ -193,6 +208,44 @@
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_EFER 0xc0000080
#define MSR_EFER_SCE (1 << 0)
#define MSR_EFER_LME (1 << 8)
#define MSR_EFER_LMA (1 << 10)
#define MSR_EFER_NXE (1 << 11)
#define MSR_EFER_FFXSR (1 << 14)
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_CSTAR 0xc0000083
#define MSR_FMASK 0xc0000084
#define MSR_FSBASE 0xc0000100
#define MSR_GSBASE 0xc0000101
#define MSR_KERNELGSBASE 0xc0000102
/* cpuid_features bits */
#define CPUID_FP87 (1 << 0)
#define CPUID_VME (1 << 1)
#define CPUID_DE (1 << 2)
#define CPUID_PSE (1 << 3)
#define CPUID_TSC (1 << 4)
#define CPUID_MSR (1 << 5)
#define CPUID_PAE (1 << 6)
#define CPUID_MCE (1 << 7)
#define CPUID_CX8 (1 << 8)
#define CPUID_APIC (1 << 9)
#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
#define CPUID_MTRR (1 << 12)
#define CPUID_PGE (1 << 13)
#define CPUID_MCA (1 << 14)
#define CPUID_CMOV (1 << 15)
/* ... */
#define CPUID_MMX (1 << 23)
#define CPUID_FXSR (1 << 24)
#define CPUID_SSE (1 << 25)
#define CPUID_SSE2 (1 << 26)
#define EXCP00_DIVZ 0
#define EXCP01_SSTP 1
#define EXCP02_NMI 2
@ -219,42 +272,52 @@ enum {
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
CC_OP_MULL,
CC_OP_MULQ,
CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
CC_OP_ADDW,
CC_OP_ADDL,
CC_OP_ADDQ,
CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
CC_OP_ADCW,
CC_OP_ADCL,
CC_OP_ADCQ,
CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
CC_OP_SUBW,
CC_OP_SUBL,
CC_OP_SUBQ,
CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
CC_OP_SBBW,
CC_OP_SBBL,
CC_OP_SBBQ,
CC_OP_LOGICB, /* modify all flags, CC_DST = res */
CC_OP_LOGICW,
CC_OP_LOGICL,
CC_OP_LOGICQ,
CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
CC_OP_INCW,
CC_OP_INCL,
CC_OP_INCQ,
CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
CC_OP_DECW,
CC_OP_DECL,
CC_OP_DECQ,
CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
CC_OP_SHLW,
CC_OP_SHLL,
CC_OP_SHLQ,
CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
CC_OP_SARW,
CC_OP_SARL,
CC_OP_SARQ,
CC_OP_NB,
};
@ -271,22 +334,42 @@ typedef double CPU86_LDouble;
typedef struct SegmentCache {
uint32_t selector;
uint8_t *base;
target_ulong base;
uint32_t limit;
uint32_t flags;
} SegmentCache;
typedef struct {
union {
uint8_t b[16];
uint16_t w[8];
uint32_t l[4];
uint64_t q[2];
} u;
} XMMReg;
#ifdef TARGET_X86_64
#define CPU_NB_REGS 16
#else
#define CPU_NB_REGS 8
#endif
typedef struct CPUX86State {
#if TARGET_LONG_BITS > HOST_LONG_BITS
/* temporaries if we cannot store them in host registers */
target_ulong t0, t1, t2;
#endif
/* standard registers */
uint32_t regs[8];
uint32_t eip;
uint32_t eflags; /* eflags register. During CPU emulation, CC
target_ulong regs[CPU_NB_REGS];
target_ulong eip;
target_ulong eflags; /* eflags register. During CPU emulation, CC
flags and DF are set to zero because they are
stored elsewhere */
/* emulator internal eflags handling */
uint32_t cc_src;
uint32_t cc_dst;
target_ulong cc_src;
target_ulong cc_dst;
uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
uint32_t hflags; /* hidden flags, see HF_xxx constants */
@ -314,10 +397,21 @@ typedef struct CPUX86State {
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
XMMReg xmm_regs[CPU_NB_REGS];
XMMReg xmm_t0;
/* sysenter registers */
uint32_t sysenter_cs;
uint32_t sysenter_esp;
uint32_t sysenter_eip;
#ifdef TARGET_X86_64
target_ulong efer;
target_ulong star;
target_ulong lstar;
target_ulong cstar;
target_ulong fmask;
target_ulong kernelgsbase;
#endif
/* temporary data for USE_CODE_COPY mode */
#ifdef USE_CODE_COPY
@ -333,8 +427,8 @@ typedef struct CPUX86State {
int exception_is_int;
int exception_next_eip;
struct TranslationBlock *current_tb; /* currently executing TB */
uint32_t cr[5]; /* NOTE: cr1 is unused */
uint32_t dr[8]; /* debug registers */
target_ulong cr[5]; /* NOTE: cr1 is unused */
target_ulong dr[8]; /* debug registers */
int interrupt_request;
int user_mode_only; /* user mode only simulation */
@ -346,18 +440,28 @@ typedef struct CPUX86State {
context) */
unsigned long mem_write_pc; /* host pc at which the memory was
written */
unsigned long mem_write_vaddr; /* target virtual addr at which the
memory was written */
target_ulong mem_write_vaddr; /* target virtual addr at which the
memory was written */
/* 0 = kernel, 1 = user */
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
/* from this point: preserved by CPU reset */
/* ice debug support */
uint32_t breakpoints[MAX_BREAKPOINTS];
target_ulong breakpoints[MAX_BREAKPOINTS];
int nb_breakpoints;
int singlestep_enabled;
/* processor features (e.g. for CPUID insn) */
uint32_t cpuid_vendor1;
uint32_t cpuid_vendor2;
uint32_t cpuid_vendor3;
uint32_t cpuid_version;
uint32_t cpuid_features;
/* in order to simplify APIC support, we leave this pointer to the
user */
struct APICState *apic_state;
/* user data */
void *opaque;
} CPUX86State;
@ -382,7 +486,7 @@ void cpu_set_ferr(CPUX86State *s);
cache: it synchronizes the hflags with the segment cache values */
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
int seg_reg, unsigned int selector,
uint8_t *base, unsigned int limit,
uint32_t base, unsigned int limit,
unsigned int flags)
{
SegmentCache *sc;
@ -395,27 +499,45 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
sc->flags = flags;
/* update the hidden flags */
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
if (!(env->cr[0] & CR0_PE_MASK) ||
(env->eflags & VM_MASK) ||
!(new_hflags & HF_CS32_MASK)) {
/* XXX: try to avoid this test. The problem comes from the
fact that is real mode or vm86 mode we only modify the
'base' and 'selector' fields of the segment cache to go
faster. A solution may be to force addseg to one in
translate-i386.c. */
new_hflags |= HF_ADDSEG_MASK;
} else {
new_hflags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) <<
HF_ADDSEG_SHIFT;
{
if (seg_reg == R_CS) {
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
/* long mode */
env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
env->hflags &= ~(HF_ADDSEG_MASK);
} else
#endif
{
/* legacy / compatibility case */
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
new_hflags;
}
}
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
if (env->hflags & HF_CS64_MASK) {
/* zero base assumed for DS, ES and SS in long mode */
} else if (!(env->cr[0] & CR0_PE_MASK) ||
(env->eflags & VM_MASK) ||
!(new_hflags & HF_CS32_MASK)) {
/* XXX: try to avoid this test. The problem comes from the
fact that is real mode or vm86 mode we only modify the
'base' and 'selector' fields of the segment cache to go
faster. A solution may be to force addseg to one in
translate-i386.c. */
new_hflags |= HF_ADDSEG_MASK;
} else {
new_hflags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) <<
HF_ADDSEG_SHIFT;
}
env->hflags = (env->hflags &
~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
}
env->hflags = (env->hflags &
~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
}
/* wrapper, just in case memory mappings must be changed */
@ -448,6 +570,9 @@ void cpu_x86_set_a20(CPUX86State *env, int a20_state);
uint64_t cpu_get_tsc(CPUX86State *env);
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
uint64_t cpu_get_apic_base(CPUX86State *env);
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);

View File

@ -20,14 +20,29 @@
#include "config.h"
#include "dyngen-exec.h"
/* XXX: factorize this mess */
#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
#define HOST_LONG_BITS 64
#else
#define HOST_LONG_BITS 32
#endif
#ifdef TARGET_X86_64
#define TARGET_LONG_BITS 64
#else
#define TARGET_LONG_BITS 32
#endif
/* at least 4 register variables are defined */
register struct CPUX86State *env asm(AREG0);
/* XXX: use 64 bit regs if HOST_LONG_BITS == 64 */
#if TARGET_LONG_BITS == 32
register uint32_t T0 asm(AREG1);
register uint32_t T1 asm(AREG2);
register uint32_t T2 asm(AREG3);
#define A0 T2
/* if more registers are available, we define some registers too */
#ifdef AREG4
register uint32_t EAX asm(AREG4);
@ -69,6 +84,17 @@ register uint32_t EDI asm(AREG11);
#define reg_EDI
#endif
#else
/* no registers can be used */
#define T0 (env->t0)
#define T1 (env->t1)
#define T2 (env->t2)
#endif
#define A0 T2
extern FILE *logfile;
extern int loglevel;
@ -136,26 +162,24 @@ void helper_movl_crN_T0(int reg);
void helper_movl_drN_T0(int reg);
void helper_invlpg(unsigned int addr);
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3);
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write, int is_user, int is_softmmu);
void tlb_fill(unsigned long addr, int is_write, int is_user,
void tlb_fill(target_ulong addr, int is_write, int is_user,
void *retaddr);
void __hidden cpu_lock(void);
void __hidden cpu_unlock(void);
void do_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip, int is_hw);
target_ulong next_eip, int is_hw);
void do_interrupt_user(int intno, int is_int, int error_code,
unsigned int next_eip);
target_ulong next_eip);
void raise_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip);
void raise_exception_err(int exception_index, int error_code);
void raise_exception(int exception_index);
void __hidden cpu_loop_exit(void);
void helper_fsave(uint8_t *ptr, int data32);
void helper_frstor(uint8_t *ptr, int data32);
void OPPROTO op_movl_eflags_T0(void);
void OPPROTO op_movl_T0_eflags(void);
@ -163,13 +187,20 @@ void raise_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip);
void raise_exception_err(int exception_index, int error_code);
void raise_exception(int exception_index);
void helper_divl_EAX_T0(uint32_t eip);
void helper_idivl_EAX_T0(uint32_t eip);
void helper_divl_EAX_T0(void);
void helper_idivl_EAX_T0(void);
void helper_mulq_EAX_T0(void);
void helper_imulq_EAX_T0(void);
void helper_imulq_T0_T1(void);
void helper_divq_EAX_T0(void);
void helper_idivq_EAX_T0(void);
void helper_cmpxchg8b(void);
void helper_cpuid(void);
void helper_enter_level(int level, int data32);
void helper_sysenter(void);
void helper_sysexit(void);
void helper_syscall(void);
void helper_sysret(int dflag);
void helper_rdtsc(void);
void helper_rdmsr(void);
void helper_wrmsr(void);
@ -252,7 +283,7 @@ void check_iol_DX(void);
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
static inline double ldfq(void *ptr)
static inline double ldfq(target_ulong ptr)
{
union {
double d;
@ -262,7 +293,7 @@ static inline double ldfq(void *ptr)
return u.d;
}
static inline void stfq(void *ptr, double v)
static inline void stfq(target_ulong ptr, double v)
{
union {
double d;
@ -272,7 +303,7 @@ static inline void stfq(void *ptr, double v)
stq(ptr, u.i);
}
static inline float ldfl(void *ptr)
static inline float ldfl(target_ulong ptr)
{
union {
float f;
@ -282,7 +313,7 @@ static inline float ldfl(void *ptr)
return u.f;
}
static inline void stfl(void *ptr, float v)
static inline void stfl(target_ulong ptr, float v)
{
union {
float f;
@ -411,7 +442,7 @@ static inline void fpop(void)
}
#ifndef USE_X86LDOUBLE
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
{
CPU86_LDoubleU temp;
int upper, e;
@ -451,12 +482,12 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
#ifdef CONFIG_USER_ONLY
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
{
return *(CPU86_LDouble *)ptr;
}
static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
{
*(CPU86_LDouble *)ptr = f;
}
@ -465,7 +496,7 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
/* we use memory access macros */
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
{
CPU86_LDoubleU temp;
@ -474,7 +505,7 @@ static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
return temp.d;
}
static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
{
CPU86_LDoubleU temp;
@ -522,10 +553,12 @@ void helper_fscale(void);
void helper_fsin(void);
void helper_fcos(void);
void helper_fxam_ST0(void);
void helper_fstenv(uint8_t *ptr, int data32);
void helper_fldenv(uint8_t *ptr, int data32);
void helper_fsave(uint8_t *ptr, int data32);
void helper_frstor(uint8_t *ptr, int data32);
void helper_fstenv(target_ulong ptr, int data32);
void helper_fldenv(target_ulong ptr, int data32);
void helper_fsave(target_ulong ptr, int data32);
void helper_frstor(target_ulong ptr, int data32);
void helper_fxsave(target_ulong ptr, int data64);
void helper_fxrstor(target_ulong ptr, int data64);
void restore_native_fp_state(CPUState *env);
void save_native_fp_state(CPUState *env);

File diff suppressed because it is too large Load Diff

View File

@ -77,6 +77,41 @@ CPUX86State *cpu_x86_init(void)
asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
}
#endif
{
int family, model, stepping;
#ifdef TARGET_X86_64
env->cpuid_vendor1 = 0x68747541; /* "Auth" */
env->cpuid_vendor2 = 0x69746e65; /* "enti" */
env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
family = 6;
model = 2;
stepping = 3;
#else
env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
#if 0
/* pentium 75-200 */
family = 5;
model = 2;
stepping = 11;
#else
/* pentium pro */
family = 6;
model = 1;
stepping = 3;
#endif
#endif
env->cpuid_version = (family << 8) | (model << 4) | stepping;
env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
CPUID_TSC | CPUID_MSR | CPUID_MCE |
CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
#ifdef TARGET_X86_64
/* currently not enabled for std i386 because not fully tested */
env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE |
CPUID_SSE | CPUID_SSE2;
#endif
}
cpu_single_env = env;
cpu_reset(env);
return env;
@ -107,12 +142,12 @@ void cpu_reset(CPUX86State *env)
env->tr.limit = 0xffff;
env->tr.flags = DESC_P_MASK;
cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0xffff0000, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
env->eip = 0xfff0;
env->regs[R_EDX] = 0x600; /* indicate P6 processor */
@ -136,36 +171,56 @@ void cpu_x86_close(CPUX86State *env)
static const char *cc_op_str[] = {
"DYNAMIC",
"EFLAGS",
"MULB",
"MULW",
"MULL",
"MULQ",
"ADDB",
"ADDW",
"ADDL",
"ADDQ",
"ADCB",
"ADCW",
"ADCL",
"ADCQ",
"SUBB",
"SUBW",
"SUBL",
"SUBQ",
"SBBB",
"SBBW",
"SBBL",
"SBBQ",
"LOGICB",
"LOGICW",
"LOGICL",
"LOGICQ",
"INCB",
"INCW",
"INCL",
"INCQ",
"DECB",
"DECW",
"DECL",
"DECQ",
"SHLB",
"SHLW",
"SHLL",
"SHLQ",
"SARB",
"SARW",
"SARL",
"SARQ",
};
void cpu_dump_state(CPUState *env, FILE *f,
@ -177,55 +232,147 @@ void cpu_dump_state(CPUState *env, FILE *f,
static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
eflags = env->eflags;
cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
env->eip, eflags,
eflags & DF_MASK ? 'D' : '-',
eflags & CC_O ? 'O' : '-',
eflags & CC_S ? 'S' : '-',
eflags & CC_Z ? 'Z' : '-',
eflags & CC_A ? 'A' : '-',
eflags & CC_P ? 'P' : '-',
eflags & CC_C ? 'C' : '-',
env->hflags & HF_CPL_MASK,
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1);
for(i = 0; i < 6; i++) {
SegmentCache *sc = &env->segs[i];
cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
seg_name[i],
sc->selector,
(int)sc->base,
sc->limit,
sc->flags);
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
cpu_fprintf(f,
"RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
"RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
"R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
"R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
"RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
env->regs[R_EAX],
env->regs[R_EBX],
env->regs[R_ECX],
env->regs[R_EDX],
env->regs[R_ESI],
env->regs[R_EDI],
env->regs[R_EBP],
env->regs[R_ESP],
env->regs[8],
env->regs[9],
env->regs[10],
env->regs[11],
env->regs[12],
env->regs[13],
env->regs[14],
env->regs[15],
env->eip, eflags,
eflags & DF_MASK ? 'D' : '-',
eflags & CC_O ? 'O' : '-',
eflags & CC_S ? 'S' : '-',
eflags & CC_Z ? 'Z' : '-',
eflags & CC_A ? 'A' : '-',
eflags & CC_P ? 'P' : '-',
eflags & CC_C ? 'C' : '-',
env->hflags & HF_CPL_MASK,
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1);
} else
#endif
{
cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
(uint32_t)env->regs[R_EAX],
(uint32_t)env->regs[R_EBX],
(uint32_t)env->regs[R_ECX],
(uint32_t)env->regs[R_EDX],
(uint32_t)env->regs[R_ESI],
(uint32_t)env->regs[R_EDI],
(uint32_t)env->regs[R_EBP],
(uint32_t)env->regs[R_ESP],
(uint32_t)env->eip, eflags,
eflags & DF_MASK ? 'D' : '-',
eflags & CC_O ? 'O' : '-',
eflags & CC_S ? 'S' : '-',
eflags & CC_Z ? 'Z' : '-',
eflags & CC_A ? 'A' : '-',
eflags & CC_P ? 'P' : '-',
eflags & CC_C ? 'C' : '-',
env->hflags & HF_CPL_MASK,
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1);
}
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
for(i = 0; i < 6; i++) {
SegmentCache *sc = &env->segs[i];
cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
seg_name[i],
sc->selector,
sc->base,
sc->limit,
sc->flags);
}
cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
env->ldt.selector,
env->ldt.base,
env->ldt.limit,
env->ldt.flags);
cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
env->tr.selector,
env->tr.base,
env->tr.limit,
env->tr.flags);
cpu_fprintf(f, "GDT= %016llx %08x\n",
env->gdt.base, env->gdt.limit);
cpu_fprintf(f, "IDT= %016llx %08x\n",
env->idt.base, env->idt.limit);
cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
(uint32_t)env->cr[0],
env->cr[2],
env->cr[3],
(uint32_t)env->cr[4]);
} else
#endif
{
for(i = 0; i < 6; i++) {
SegmentCache *sc = &env->segs[i];
cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
seg_name[i],
sc->selector,
(uint32_t)sc->base,
sc->limit,
sc->flags);
}
cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
env->ldt.selector,
(uint32_t)env->ldt.base,
env->ldt.limit,
env->ldt.flags);
cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
env->tr.selector,
(uint32_t)env->tr.base,
env->tr.limit,
env->tr.flags);
cpu_fprintf(f, "GDT= %08x %08x\n",
(uint32_t)env->gdt.base, env->gdt.limit);
cpu_fprintf(f, "IDT= %08x %08x\n",
(uint32_t)env->idt.base, env->idt.limit);
cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
(uint32_t)env->cr[0],
(uint32_t)env->cr[2],
(uint32_t)env->cr[3],
(uint32_t)env->cr[4]);
}
cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
env->ldt.selector,
(int)env->ldt.base,
env->ldt.limit,
env->ldt.flags);
cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
env->tr.selector,
(int)env->tr.base,
env->tr.limit,
env->tr.flags);
cpu_fprintf(f, "GDT= %08x %08x\n",
(int)env->gdt.base, env->gdt.limit);
cpu_fprintf(f, "IDT= %08x %08x\n",
(int)env->idt.base, env->idt.limit);
cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
if (flags & X86_DUMP_CCOP) {
if ((unsigned)env->cc_op < CC_OP_NB)
snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
else
snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
env->cc_src, env->cc_dst, cc_op_name);
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
env->cc_src, env->cc_dst,
cc_op_name);
} else
#endif
{
cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
(uint32_t)env->cc_src, (uint32_t)env->cc_dst,
cc_op_name);
}
}
if (flags & X86_DUMP_FPU) {
cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
@ -274,6 +421,24 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
tlb_flush(env, 1);
}
#ifdef TARGET_X86_64
if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
(env->efer & MSR_EFER_LME)) {
/* enter in long mode */
/* XXX: generate an exception */
if (!(env->cr[4] & CR4_PAE_MASK))
return;
env->efer |= MSR_EFER_LMA;
env->hflags |= HF_LMA_MASK;
} else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
(env->efer & MSR_EFER_LMA)) {
/* exit long mode */
env->efer &= ~MSR_EFER_LMA;
env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
env->eip &= 0xffffffff;
}
#endif
env->cr[0] = new_cr0 | CR0_ET_MASK;
/* update PE flag in hidden flags */
@ -286,12 +451,12 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
}
void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
{
env->cr[3] = new_cr3;
if (env->cr[0] & CR0_PG_MASK) {
#if defined(DEBUG_MMU)
printf("CR3 update: CR3=%08x\n", new_cr3);
printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
#endif
tlb_flush(env, 0);
}
@ -300,7 +465,7 @@ void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
{
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", env->cr[4]);
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif
if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
(env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
@ -315,22 +480,51 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
tlb_flush_page(env, addr);
}
static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
{
/* XXX: incorrect */
return phys_ram_base + addr;
}
/* WARNING: addr must be aligned */
uint32_t ldl_phys_aligned(target_phys_addr_t addr)
{
uint8_t *ptr;
uint32_t val;
ptr = get_phys_mem_ptr(addr);
if (!ptr)
val = 0;
else
val = ldl_raw(ptr);
return val;
}
void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
{
uint8_t *ptr;
ptr = get_phys_mem_ptr(addr);
if (!ptr)
return;
stl_raw(ptr, val);
}
/* return value:
-1 = cannot handle fault
0 = nothing more to do
1 = generate PF fault
2 = soft MMU activation required for this block
*/
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write, int is_user, int is_softmmu)
{
uint8_t *pde_ptr, *pte_ptr;
uint32_t pde, pte, virt_addr, ptep;
uint32_t pdpe_addr, pde_addr, pte_addr;
uint32_t pde, pte, ptep, pdpe;
int error_code, is_dirty, prot, page_size, ret;
unsigned long paddr, vaddr, page_offset;
unsigned long paddr, page_offset;
target_ulong vaddr, virt_addr;
#if defined(DEBUG_MMU)
printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
addr, is_write, is_user, env->eip);
#endif
is_write &= 1;
@ -349,90 +543,166 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
goto do_mapping;
}
/* page directory entry */
pde_ptr = phys_ram_base +
(((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
pde = ldl_raw(pde_ptr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
if (is_user) {
if (!(pde & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_raw(pde_ptr, pde);
}
pte = pde & ~0x003ff000; /* align to 4MB */
ptep = pte;
page_size = 4096 * 1024;
virt_addr = addr & ~0x003fffff;
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_raw(pde_ptr, pde);
if (env->cr[4] & CR4_PAE_MASK) {
/* XXX: we only use 32 bit physical addresses */
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
uint32_t pml4e_addr, pml4e;
int32_t sext;
/* XXX: handle user + rw rights */
/* XXX: handle NX flag */
/* test virtual address sign extension */
sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1) {
error_code = 0;
goto do_fault;
}
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
pml4e = ldl_phys_aligned(pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
stl_phys_aligned(pml4e_addr, pml4e);
}
pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
pdpe = ldl_phys_aligned(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
stl_phys_aligned(pdpe_addr, pdpe);
}
} else
#endif
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
env->a20_mask;
pdpe = ldl_phys_aligned(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
}
/* page directory entry */
pte_ptr = phys_ram_base +
(((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
pte = ldl_raw(pte_ptr);
if (!(pte & PG_PRESENT_MASK)) {
pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
pde = ldl_phys_aligned(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
goto handle_big_page;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_aligned(pde_addr, pde);
}
pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
goto handle_4k_page;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_raw(pte_ptr, pte);
} else {
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
env->a20_mask;
pde = ldl_phys_aligned(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
page_size = 4096;
virt_addr = addr & ~0xfff;
}
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
handle_big_page:
if (is_user) {
if (!(pde & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_phys_aligned(pde_addr, pde);
}
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
ptep = pte;
virt_addr = addr & ~(page_size - 1);
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_aligned(pde_addr, pde);
}
/* the page can be put in the TLB */
prot = PAGE_READ;
if (pte & PG_DIRTY_MASK) {
/* only set write access if already dirty... otherwise wait
for dirty access */
if (is_user) {
if (ptep & PG_RW_MASK)
prot |= PAGE_WRITE;
} else {
if (!(env->cr[0] & CR0_WP_MASK) ||
(ptep & PG_RW_MASK))
prot |= PAGE_WRITE;
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
handle_4k_page:
pte = ldl_phys_aligned(pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_phys_aligned(pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
}
/* the page can be put in the TLB */
prot = PAGE_READ;
if (pte & PG_DIRTY_MASK) {
/* only set write access if already dirty... otherwise wait
for dirty access */
if (is_user) {
if (ptep & PG_RW_MASK)
prot |= PAGE_WRITE;
} else {
if (!(env->cr[0] & CR0_WP_MASK) ||
(ptep & PG_RW_MASK))
prot |= PAGE_WRITE;
}
}
}
do_mapping:
pte = pte & env->a20_mask;

File diff suppressed because it is too large Load Diff

View File

@ -20,29 +20,56 @@
*/
void OPPROTO glue(op_movl_A0,REGNAME)(void)
{
A0 = REG;
A0 = (uint32_t)REG;
}
void OPPROTO glue(op_addl_A0,REGNAME)(void)
{
A0 += REG;
A0 = (uint32_t)(A0 + REG);
}
void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void)
{
A0 += REG << 1;
A0 = (uint32_t)(A0 + (REG << 1));
}
void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void)
{
A0 += REG << 2;
A0 = (uint32_t)(A0 + (REG << 2));
}
void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void)
{
A0 += REG << 3;
A0 = (uint32_t)(A0 + (REG << 3));
}
#ifdef TARGET_X86_64
void OPPROTO glue(op_movq_A0,REGNAME)(void)
{
A0 = REG;
}
void OPPROTO glue(op_addq_A0,REGNAME)(void)
{
A0 = (A0 + REG);
}
void OPPROTO glue(glue(op_addq_A0,REGNAME),_s1)(void)
{
A0 = (A0 + (REG << 1));
}
void OPPROTO glue(glue(op_addq_A0,REGNAME),_s2)(void)
{
A0 = (A0 + (REG << 2));
}
void OPPROTO glue(glue(op_addq_A0,REGNAME),_s3)(void)
{
A0 = (A0 + (REG << 3));
}
#endif
void OPPROTO glue(op_movl_T0,REGNAME)(void)
{
T0 = REG;
@ -65,72 +92,99 @@ void OPPROTO glue(op_movh_T1,REGNAME)(void)
void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void)
{
REG = T0;
REG = (uint32_t)T0;
}
void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void)
{
REG = T1;
REG = (uint32_t)T1;
}
void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void)
{
REG = (uint32_t)A0;
}
#ifdef TARGET_X86_64
void OPPROTO glue(glue(op_movq,REGNAME),_T0)(void)
{
REG = T0;
}
void OPPROTO glue(glue(op_movq,REGNAME),_T1)(void)
{
REG = T1;
}
void OPPROTO glue(glue(op_movq,REGNAME),_A0)(void)
{
REG = A0;
}
#endif
/* mov T1 to REG if T0 is true */
void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void)
{
if (T0)
REG = (REG & 0xffff0000) | (T1 & 0xffff);
REG = (REG & ~0xffff) | (T1 & 0xffff);
FORCE_RET();
}
void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void)
{
if (T0)
REG = (uint32_t)T1;
FORCE_RET();
}
#ifdef TARGET_X86_64
void OPPROTO glue(glue(op_cmovq,REGNAME),_T1_T0)(void)
{
if (T0)
REG = T1;
FORCE_RET();
}
#endif
/* NOTE: T0 high order bits are ignored */
void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void)
{
REG = (REG & 0xffff0000) | (T0 & 0xffff);
REG = (REG & ~0xffff) | (T0 & 0xffff);
}
/* NOTE: T0 high order bits are ignored */
void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void)
{
REG = (REG & 0xffff0000) | (T1 & 0xffff);
REG = (REG & ~0xffff) | (T1 & 0xffff);
}
/* NOTE: A0 high order bits are ignored */
void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void)
{
REG = (REG & 0xffff0000) | (A0 & 0xffff);
REG = (REG & ~0xffff) | (A0 & 0xffff);
}
/* NOTE: T0 high order bits are ignored */
void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void)
{
REG = (REG & 0xffffff00) | (T0 & 0xff);
REG = (REG & ~0xff) | (T0 & 0xff);
}
/* NOTE: T0 high order bits are ignored */
void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void)
{
REG = (REG & 0xffff00ff) | ((T0 & 0xff) << 8);
REG = (REG & ~0xff00) | ((T0 & 0xff) << 8);
}
/* NOTE: T1 high order bits are ignored */
void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void)
{
REG = (REG & 0xffffff00) | (T1 & 0xff);
REG = (REG & ~0xff) | (T1 & 0xff);
}
/* NOTE: T1 high order bits are ignored */
void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void)
{
REG = (REG & 0xffff00ff) | ((T1 & 0xff) << 8);
REG = (REG & ~0xff00) | ((T1 & 0xff) << 8);
}

View File

@ -1,83 +1,134 @@
void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
T0 = glue(ldub, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
T0 = glue(ldsb, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
T0 = glue(lduw, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
T0 = glue(ldsw, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
T0 = (uint32_t)glue(ldl, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
T1 = glue(ldub, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
T1 = glue(ldsb, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
T1 = glue(lduw, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
T1 = glue(ldsw, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
T1 = glue(ldl, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void)
{
glue(stb, MEMSUFFIX)((uint8_t *)A0, T0);
glue(stb, MEMSUFFIX)(A0, T0);
}
void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void)
{
glue(stw, MEMSUFFIX)((uint8_t *)A0, T0);
glue(stw, MEMSUFFIX)(A0, T0);
}
void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void)
{
glue(stl, MEMSUFFIX)((uint8_t *)A0, T0);
glue(stl, MEMSUFFIX)(A0, T0);
}
#if 0
void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T1_A0)(void)
{
glue(stb, MEMSUFFIX)((uint8_t *)A0, T1);
glue(stb, MEMSUFFIX)(A0, T1);
}
#endif
void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T1_A0)(void)
{
glue(stw, MEMSUFFIX)((uint8_t *)A0, T1);
glue(stw, MEMSUFFIX)(A0, T1);
}
void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T1_A0)(void)
{
glue(stl, MEMSUFFIX)((uint8_t *)A0, T1);
glue(stl, MEMSUFFIX)(A0, T1);
}
/* SSE support */
void OPPROTO glue(glue(op_ldo, MEMSUFFIX), _env_A0)(void)
{
XMMReg *p;
p = (XMMReg *)((char *)env + PARAM1);
/* XXX: host endianness ? */
p->u.q[0] = glue(ldq, MEMSUFFIX)(A0);
p->u.q[1] = glue(ldq, MEMSUFFIX)(A0 + 8);
}
void OPPROTO glue(glue(op_sto, MEMSUFFIX), _env_A0)(void)
{
XMMReg *p;
p = (XMMReg *)((char *)env + PARAM1);
/* XXX: host endianness ? */
glue(stq, MEMSUFFIX)(A0, p->u.q[0]);
glue(stq, MEMSUFFIX)(A0 + 8, p->u.q[1]);
}
#ifdef TARGET_X86_64
void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T0_A0)(void)
{
T0 = (int32_t)glue(ldl, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T1_A0)(void)
{
T1 = (int32_t)glue(ldl, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldq, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldq, MEMSUFFIX)(A0);
}
void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T0_A0)(void)
{
glue(stq, MEMSUFFIX)(A0, T0);
}
void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T1_A0)(void)
{
glue(stq, MEMSUFFIX)(A0, T1);
}
#endif
#undef MEMSUFFIX

View File

@ -20,7 +20,12 @@
*/
#define DATA_BITS (1 << (3 + SHIFT))
#define SHIFT_MASK (DATA_BITS - 1)
#define SIGN_MASK (1 << (DATA_BITS - 1))
#define SIGN_MASK (((target_ulong)1) << (DATA_BITS - 1))
#if DATA_BITS <= 32
#define SHIFT1_MASK 0x1f
#else
#define SHIFT1_MASK 0x3f
#endif
#if DATA_BITS == 8
#define SUFFIX b
@ -37,6 +42,11 @@
#define DATA_TYPE uint32_t
#define DATA_STYPE int32_t
#define DATA_MASK 0xffffffff
#elif DATA_BITS == 64
#define SUFFIX q
#define DATA_TYPE uint64_t
#define DATA_STYPE int64_t
#define DATA_MASK 0xffffffffffffffff
#else
#error unhandled operand size
#endif
@ -46,7 +56,7 @@
static int glue(compute_all_add, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_SRC;
src2 = CC_DST - CC_SRC;
cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
@ -60,7 +70,8 @@ static int glue(compute_all_add, SUFFIX)(void)
static int glue(compute_c_add, SUFFIX)(void)
{
int src1, cf;
int cf;
target_long src1;
src1 = CC_SRC;
cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
return cf;
@ -69,7 +80,7 @@ static int glue(compute_c_add, SUFFIX)(void)
static int glue(compute_all_adc, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_SRC;
src2 = CC_DST - CC_SRC - 1;
cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
@ -83,7 +94,8 @@ static int glue(compute_all_adc, SUFFIX)(void)
static int glue(compute_c_adc, SUFFIX)(void)
{
int src1, cf;
int cf;
target_long src1;
src1 = CC_SRC;
cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
return cf;
@ -92,7 +104,7 @@ static int glue(compute_c_adc, SUFFIX)(void)
static int glue(compute_all_sub, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
@ -106,7 +118,8 @@ static int glue(compute_all_sub, SUFFIX)(void)
static int glue(compute_c_sub, SUFFIX)(void)
{
int src1, src2, cf;
int cf;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
@ -116,7 +129,7 @@ static int glue(compute_c_sub, SUFFIX)(void)
static int glue(compute_all_sbb, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC + 1;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
@ -130,7 +143,8 @@ static int glue(compute_all_sbb, SUFFIX)(void)
static int glue(compute_c_sbb, SUFFIX)(void)
{
int src1, src2, cf;
int cf;
target_long src1, src2;
src1 = CC_DST + CC_SRC + 1;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
@ -157,7 +171,7 @@ static int glue(compute_c_logic, SUFFIX)(void)
static int glue(compute_all_inc, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_DST - 1;
src2 = 1;
cf = CC_SRC;
@ -179,7 +193,7 @@ static int glue(compute_c_inc, SUFFIX)(void)
static int glue(compute_all_dec, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
int src1, src2;
target_long src1, src2;
src1 = CC_DST + 1;
src2 = 1;
cf = CC_SRC;
@ -187,7 +201,7 @@ static int glue(compute_all_dec, SUFFIX)(void)
af = (CC_DST ^ src1 ^ src2) & 0x10;
zf = ((DATA_TYPE)CC_DST == 0) << 6;
sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
of = ((CC_DST & DATA_MASK) == ((uint32_t)SIGN_MASK - 1)) << 11;
of = ((CC_DST & DATA_MASK) == ((target_ulong)SIGN_MASK - 1)) << 11;
return cf | pf | af | zf | sf | of;
}
@ -256,71 +270,66 @@ static int glue(compute_all_mul, SUFFIX)(void)
void OPPROTO glue(op_jb_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
if ((DATA_TYPE)src1 < (DATA_TYPE)src2)
JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST == 0)
JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jnz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST != 0)
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jbe_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
if ((DATA_TYPE)src1 <= (DATA_TYPE)src2)
JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_js_sub, SUFFIX)(void)
{
if (CC_DST & SIGN_MASK)
JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jl_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
if ((DATA_STYPE)src1 < (DATA_STYPE)src2)
JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jle_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
if ((DATA_STYPE)src1 <= (DATA_STYPE)src2)
JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 1, PARAM3);
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
@ -330,50 +339,33 @@ void OPPROTO glue(op_jle_sub, SUFFIX)(void)
void OPPROTO glue(op_loopnz, SUFFIX)(void)
{
unsigned int tmp;
int eflags;
eflags = cc_table[CC_OP].compute_all();
tmp = (ECX - 1) & DATA_MASK;
ECX = (ECX & ~DATA_MASK) | tmp;
if (tmp != 0 && !(eflags & CC_Z))
EIP = PARAM1;
else
EIP = PARAM2;
if ((DATA_TYPE)ECX != 0 && !(eflags & CC_Z))
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_loopz, SUFFIX)(void)
{
unsigned int tmp;
int eflags;
eflags = cc_table[CC_OP].compute_all();
tmp = (ECX - 1) & DATA_MASK;
ECX = (ECX & ~DATA_MASK) | tmp;
if (tmp != 0 && (eflags & CC_Z))
EIP = PARAM1;
else
EIP = PARAM2;
if ((DATA_TYPE)ECX != 0 && (eflags & CC_Z))
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_loop, SUFFIX)(void)
{
unsigned int tmp;
tmp = (ECX - 1) & DATA_MASK;
ECX = (ECX & ~DATA_MASK) | tmp;
if (tmp != 0)
EIP = PARAM1;
else
EIP = PARAM2;
FORCE_RET();
}
void OPPROTO glue(op_jecxz, SUFFIX)(void)
void OPPROTO glue(op_jz_ecx, SUFFIX)(void)
{
if ((DATA_TYPE)ECX == 0)
EIP = PARAM1;
else
EIP = PARAM2;
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
void OPPROTO glue(op_jnz_ecx, SUFFIX)(void)
{
if ((DATA_TYPE)ECX != 0)
GOTO_LABEL_PARAM(1);
FORCE_RET();
}
@ -383,7 +375,7 @@ void OPPROTO glue(op_jecxz, SUFFIX)(void)
void OPPROTO glue(op_setb_T0_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
@ -397,7 +389,7 @@ void OPPROTO glue(op_setz_T0_sub, SUFFIX)(void)
void OPPROTO glue(op_setbe_T0_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
@ -411,7 +403,7 @@ void OPPROTO glue(op_sets_T0_sub, SUFFIX)(void)
void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
@ -420,7 +412,7 @@ void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void)
void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void)
{
int src1, src2;
target_long src1, src2;
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
@ -432,7 +424,7 @@ void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void)
void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void)
{
int count;
count = T1 & 0x1f;
count = T1 & SHIFT1_MASK;
T0 = T0 << count;
FORCE_RET();
}
@ -440,7 +432,7 @@ void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void)
void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void)
{
int count;
count = T1 & 0x1f;
count = T1 & SHIFT1_MASK;
T0 &= DATA_MASK;
T0 = T0 >> count;
FORCE_RET();
@ -448,8 +440,10 @@ void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void)
void OPPROTO glue(glue(op_sar, SUFFIX), _T0_T1)(void)
{
int count, src;
count = T1 & 0x1f;
int count;
target_long src;
count = T1 & SHIFT1_MASK;
src = (DATA_STYPE)T0;
T0 = src >> count;
FORCE_RET();
@ -484,7 +478,7 @@ void OPPROTO glue(glue(op_bts, SUFFIX), _T0_T1_cc)(void)
int count;
count = T1 & SHIFT_MASK;
T1 = T0 >> count;
T0 |= (1 << count);
T0 |= (((target_long)1) << count);
}
void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void)
@ -492,7 +486,7 @@ void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void)
int count;
count = T1 & SHIFT_MASK;
T1 = T0 >> count;
T0 &= ~(1 << count);
T0 &= ~(((target_long)1) << count);
}
void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void)
@ -500,12 +494,19 @@ void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void)
int count;
count = T1 & SHIFT_MASK;
T1 = T0 >> count;
T0 ^= (1 << count);
T0 ^= (((target_long)1) << count);
}
void OPPROTO glue(glue(op_add_bit, SUFFIX), _A0_T1)(void)
{
A0 += ((DATA_STYPE)T1 >> (3 + SHIFT)) << SHIFT;
}
void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void)
{
int res, count;
int count;
target_long res;
res = T0 & DATA_MASK;
if (res != 0) {
count = 0;
@ -523,7 +524,9 @@ void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void)
void OPPROTO glue(glue(op_bsr, SUFFIX), _T0_cc)(void)
{
int res, count;
int count;
target_long res;
res = T0 & DATA_MASK;
if (res != 0) {
count = DATA_BITS - 1;
@ -555,70 +558,8 @@ void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void)
T0 = DF << SHIFT;
}
void OPPROTO glue(op_string_jz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST == 0)
JUMP_TB2(glue(op_string_jz_sub, SUFFIX), PARAM1, 3);
FORCE_RET();
}
void OPPROTO glue(op_string_jnz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST != 0)
JUMP_TB2(glue(op_string_jnz_sub, SUFFIX), PARAM1, 3);
FORCE_RET();
}
void OPPROTO glue(glue(op_string_jz_sub, SUFFIX), _im)(void)
{
if ((DATA_TYPE)CC_DST == 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_string_jnz_sub, SUFFIX), _im)(void)
{
if ((DATA_TYPE)CC_DST != 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
#if DATA_BITS >= 16
void OPPROTO glue(op_jz_ecx, SUFFIX)(void)
{
if ((DATA_TYPE)ECX == 0)
JUMP_TB(glue(op_jz_ecx, SUFFIX), PARAM1, 1, PARAM2);
FORCE_RET();
}
void OPPROTO glue(glue(op_jz_ecx, SUFFIX), _im)(void)
{
if ((DATA_TYPE)ECX == 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
#endif
/* port I/O */
#if DATA_BITS <= 32
void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void)
{
glue(cpu_out, SUFFIX)(env, T0, T1 & DATA_MASK);
@ -648,9 +589,11 @@ void OPPROTO glue(glue(op_check_io, SUFFIX), _DX)(void)
{
glue(glue(check_io, SUFFIX), _DX)();
}
#endif
#undef DATA_BITS
#undef SHIFT_MASK
#undef SHIFT1_MASK
#undef SIGN_MASK
#undef DATA_TYPE
#undef DATA_STYPE

View File

@ -28,6 +28,8 @@
#define MEM_SUFFIX w_raw
#elif DATA_BITS == 32
#define MEM_SUFFIX l_raw
#elif DATA_BITS == 64
#define MEM_SUFFIX q_raw
#endif
#elif MEM_WRITE == 1
@ -38,6 +40,8 @@
#define MEM_SUFFIX w_kernel
#elif DATA_BITS == 32
#define MEM_SUFFIX l_kernel
#elif DATA_BITS == 64
#define MEM_SUFFIX q_kernel
#endif
#elif MEM_WRITE == 2
@ -48,6 +52,8 @@
#define MEM_SUFFIX w_user
#elif DATA_BITS == 32
#define MEM_SUFFIX l_user
#elif DATA_BITS == 64
#define MEM_SUFFIX q_user
#endif
#else
@ -64,14 +70,16 @@
void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, src;
int count;
target_long src;
count = T1 & SHIFT_MASK;
if (count) {
src = T0;
T0 &= DATA_MASK;
T0 = (T0 << count) | (T0 >> (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#else
/* gcc 3.2 workaround. This is really a bug in gcc. */
asm volatile("" : : "r" (T0));
@ -86,14 +94,16 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void)
void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, src;
int count;
target_long src;
count = T1 & SHIFT_MASK;
if (count) {
src = T0;
T0 &= DATA_MASK;
T0 = (T0 >> count) | (T0 << (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#else
/* gcc 3.2 workaround. This is really a bug in gcc. */
asm volatile("" : : "r" (T0));
@ -114,7 +124,7 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1)(void)
T0 &= DATA_MASK;
T0 = (T0 << count) | (T0 >> (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
}
FORCE_RET();
@ -128,7 +138,7 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void)
T0 &= DATA_MASK;
T0 = (T0 >> count) | (T0 << (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
}
FORCE_RET();
@ -136,10 +146,11 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void)
void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, res, eflags;
unsigned int src;
int count, eflags;
target_ulong src;
target_long res;
count = T1 & 0x1f;
count = T1 & SHIFT1_MASK;
#if DATA_BITS == 16
count = rclw_table[count];
#elif DATA_BITS == 8
@ -154,7 +165,7 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void)
res |= T0 >> (DATA_BITS + 1 - count);
T0 = res;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = (eflags & ~(CC_C | CC_O)) |
(lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
@ -166,10 +177,11 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void)
void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, res, eflags;
unsigned int src;
int count, eflags;
target_ulong src;
target_long res;
count = T1 & 0x1f;
count = T1 & SHIFT1_MASK;
#if DATA_BITS == 16
count = rclw_table[count];
#elif DATA_BITS == 8
@ -184,7 +196,7 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void)
res |= T0 << (DATA_BITS + 1 - count);
T0 = res;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = (eflags & ~(CC_C | CC_O)) |
(lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
@ -196,13 +208,15 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void)
void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, src;
count = T1 & 0x1f;
int count;
target_long src;
count = T1 & SHIFT1_MASK;
if (count) {
src = (DATA_TYPE)T0 << (count - 1);
T0 = T0 << count;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = src;
CC_DST = T0;
@ -213,14 +227,16 @@ void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void)
void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, src;
count = T1 & 0x1f;
int count;
target_long src;
count = T1 & SHIFT1_MASK;
if (count) {
T0 &= DATA_MASK;
src = T0 >> (count - 1);
T0 = T0 >> count;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = src;
CC_DST = T0;
@ -231,14 +247,16 @@ void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void)
void OPPROTO glue(glue(op_sar, MEM_SUFFIX), _T0_T1_cc)(void)
{
int count, src;
count = T1 & 0x1f;
int count;
target_long src;
count = T1 & SHIFT1_MASK;
if (count) {
src = (DATA_STYPE)T0;
T0 = src >> count;
src = src >> (count - 1);
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = src;
CC_DST = T0;
@ -262,7 +280,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void)
res |= T1 << (count - 16);
T0 = res >> 16;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -282,7 +300,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
res |= T1 << (count - 16);
T0 = res >> 16;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -304,7 +322,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void)
res |= T1 << (32 - count);
T0 = res;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -325,7 +343,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
res |= T1 << (32 - count);
T0 = res;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -335,17 +353,19 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
}
#endif
#if DATA_BITS == 32
#if DATA_BITS >= 32
void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void)
{
int count, tmp;
int count;
target_long tmp;
count = PARAM1;
T0 &= DATA_MASK;
T1 &= DATA_MASK;
tmp = T0 << (count - 1);
T0 = (T0 << count) | (T1 >> (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -353,15 +373,17 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void)
void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
{
int count, tmp;
count = ECX & 0x1f;
int count;
target_long tmp;
count = ECX & SHIFT1_MASK;
if (count) {
T0 &= DATA_MASK;
T1 &= DATA_MASK;
tmp = T0 << (count - 1);
T0 = (T0 << count) | (T1 >> (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -372,14 +394,16 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void)
{
int count, tmp;
int count;
target_long tmp;
count = PARAM1;
T0 &= DATA_MASK;
T1 &= DATA_MASK;
tmp = T0 >> (count - 1);
T0 = (T0 >> count) | (T1 << (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -388,15 +412,17 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void)
void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
{
int count, tmp;
count = ECX & 0x1f;
int count;
target_long tmp;
count = ECX & SHIFT1_MASK;
if (count) {
T0 &= DATA_MASK;
T1 &= DATA_MASK;
tmp = T0 >> (count - 1);
T0 = (T0 >> count) | (T1 << (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = tmp;
CC_DST = T0;
@ -414,11 +440,11 @@ void OPPROTO glue(glue(op_adc, MEM_SUFFIX), _T0_T1_cc)(void)
cf = cc_table[CC_OP].compute_c();
T0 = T0 + T1 + cf;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = T1;
CC_DST = T0;
CC_OP = CC_OP_ADDB + SHIFT + cf * 3;
CC_OP = CC_OP_ADDB + SHIFT + cf * 4;
}
void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void)
@ -427,23 +453,23 @@ void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void)
cf = cc_table[CC_OP].compute_c();
T0 = T0 - T1 - cf;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
CC_SRC = T1;
CC_DST = T0;
CC_OP = CC_OP_SUBB + SHIFT + cf * 3;
CC_OP = CC_OP_SUBB + SHIFT + cf * 4;
}
void OPPROTO glue(glue(op_cmpxchg, MEM_SUFFIX), _T0_T1_EAX_cc)(void)
{
unsigned int src, dst;
target_ulong src, dst;
src = T0;
dst = EAX - T0;
if ((DATA_TYPE)dst == 0) {
T0 = T1;
#ifdef MEM_WRITE
glue(st, MEM_SUFFIX)((uint8_t *)A0, T0);
glue(st, MEM_SUFFIX)(A0, T0);
#endif
} else {
EAX = (EAX & ~DATA_MASK) | (T0 & DATA_MASK);

View File

@ -57,7 +57,7 @@ typedef struct DisasContext {
int override; /* -1 if no override */
int prefix;
int aflag, dflag;
uint8_t *pc; /* pc = eip + cs_base */
target_ulong pc; /* pc = eip + cs_base */
int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
static state change (stop translation) */
/* code output */
@ -65,7 +65,7 @@ typedef struct DisasContext {
uint8_t *gen_code_start;
/* current block context */
uint8_t *cs_base; /* base of CS segment */
target_ulong cs_base; /* base of CS segment */
int pe; /* protected mode */
int code32; /* 32 bit code segment */
int f_st; /* currently unused */
@ -277,7 +277,7 @@ static inline uint32_t insn_get(DisasContext *s, int ot)
be stopped. */
static int disas_insn(DisasContext *s)
{
uint8_t *pc_start, *pc_tmp, *pc_start_insn;
target_ulong pc_start, pc_tmp, pc_start_insn;
int b, prefixes, aflag, dflag, next_eip, val;
int ot;
int modrm, mod, op, rm;
@ -789,6 +789,8 @@ static int disas_insn(DisasContext *s)
break;
case 0x1e: /* fcomi */
break;
case 0x28: /* ffree sti */
break;
case 0x2a: /* fst sti */
break;
case 0x2b: /* fstp sti */
@ -1176,9 +1178,9 @@ static inline int gen_intermediate_code_internal(CPUState *env,
uint8_t *tc_ptr)
{
DisasContext dc1, *dc = &dc1;
uint8_t *pc_insn, *pc_start, *gen_code_end;
target_ulong pc_insn, pc_start, cs_base;
uint8_t *gen_code_end;
int flags, ret;
uint8_t *cs_base;
if (env->nb_breakpoints > 0 ||
env->singlestep_enabled)
@ -1197,8 +1199,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
dc->gen_code_start = gen_code_ptr;
/* generate intermediate code */
pc_start = (uint8_t *)tb->pc;
cs_base = (uint8_t *)tb->cs_base;
pc_start = tb->pc;
cs_base = tb->cs_base;
dc->pc = pc_start;
dc->cs_base = cs_base;
dc->pe = (flags >> HF_PE_SHIFT) & 1;
@ -1249,7 +1251,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
fprintf(logfile, "IN: COPY: %s fpu=%d\n",
lookup_symbol(pc_start),
tb->cflags & CF_TB_FP_USED ? 1 : 0);
disas(logfile, pc_start, dc->pc - pc_start, 0, !dc->code32);
target_disas(logfile, pc_start, dc->pc - pc_start, !dc->code32);
fprintf(logfile, "\n");
}
#endif

File diff suppressed because it is too large Load Diff