Merge remote branch 'rth/axp-next' into alpha-merge
* rth/axp-next: (26 commits) target-alpha: Implement TLB flush primitives. target-alpha: Use a fixed frequency for the RPCC in system mode. target-alpha: Trap for unassigned and unaligned addresses. target-alpha: Remap PIO space for 43-bit KSEG for EV6. target-alpha: Implement cpu_alpha_handle_mmu_fault for system mode. target-alpha: Implement more CALL_PAL values inline. target-alpha: Disable interrupts properly. target-alpha: All ISA checks to use TB->FLAGS. target-alpha: Swap shadow registers moving to/from PALmode. target-alpha: Implement do_interrupt for system mode. target-alpha: Add IPRs to be used by the emulation PALcode. target-alpha: Use kernel mmu_idx for pal_mode. target-alpha: Add various symbolic constants. target-alpha: Use do_restore_state for arithmetic exceptions. target-alpha: Tidy up arithmetic exceptions. target-alpha: Tidy exception constants. target-alpha: Enable the alpha-softmmu target. target-alpha: Rationalize internal processor registers. target-alpha: Merge HW_REI and HW_RET implementations. target-alpha: Cleanup MMU modes. ...
This commit is contained in:
commit
448293961f
@ -56,8 +56,8 @@ M: Paul Brook <paul@codesourcery.com>
|
||||
Guest CPU cores (TCG):
|
||||
----------------------
|
||||
Alpha
|
||||
M: qemu-devel@nongnu.org
|
||||
S: Orphan
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
S: Maintained
|
||||
F: target-alpha/
|
||||
|
||||
ARM
|
||||
|
@ -374,7 +374,8 @@ obj-m68k-y += m68k-semi.o dummy_m68k.o
|
||||
|
||||
obj-s390x-y = s390-virtio-bus.o s390-virtio.o
|
||||
|
||||
obj-alpha-y = alpha_palcode.o
|
||||
obj-alpha-y = i8259.o mc146818rtc.o
|
||||
obj-alpha-y += vga.o cirrus_vga.o
|
||||
|
||||
main.o: QEMU_CFLAGS+=$(GPROF_CFLAGS)
|
||||
|
||||
|
@ -238,10 +238,6 @@ extern const unsigned alpha_num_operands;
|
||||
#define AXP_REG_SP 30
|
||||
#define AXP_REG_ZERO 31
|
||||
|
||||
#define bfd_mach_alpha_ev4 0x10
|
||||
#define bfd_mach_alpha_ev5 0x20
|
||||
#define bfd_mach_alpha_ev6 0x30
|
||||
|
||||
enum bfd_reloc_code_real {
|
||||
BFD_RELOC_23_PCREL_S2,
|
||||
BFD_RELOC_ALPHA_HINT
|
||||
|
1
configure
vendored
1
configure
vendored
@ -831,6 +831,7 @@ if [ "$softmmu" = "yes" ] ; then
|
||||
default_target_list="\
|
||||
i386-softmmu \
|
||||
x86_64-softmmu \
|
||||
alpha-softmmu \
|
||||
arm-softmmu \
|
||||
cris-softmmu \
|
||||
lm32-softmmu \
|
||||
|
27
cpu-exec.c
27
cpu-exec.c
@ -488,10 +488,37 @@ int cpu_exec(CPUState *env1)
|
||||
next_tb = 0;
|
||||
}
|
||||
#elif defined(TARGET_ALPHA)
|
||||
{
|
||||
int idx = -1;
|
||||
/* ??? This hard-codes the OSF/1 interrupt levels. */
|
||||
switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
|
||||
case 0 ... 3:
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
idx = EXCP_DEV_INTERRUPT;
|
||||
}
|
||||
/* FALLTHRU */
|
||||
case 4:
|
||||
if (interrupt_request & CPU_INTERRUPT_TIMER) {
|
||||
idx = EXCP_CLK_INTERRUPT;
|
||||
}
|
||||
/* FALLTHRU */
|
||||
case 5:
|
||||
if (interrupt_request & CPU_INTERRUPT_SMP) {
|
||||
idx = EXCP_SMP_INTERRUPT;
|
||||
}
|
||||
/* FALLTHRU */
|
||||
case 6:
|
||||
if (interrupt_request & CPU_INTERRUPT_MCHK) {
|
||||
idx = EXCP_MCHK;
|
||||
}
|
||||
}
|
||||
if (idx >= 0) {
|
||||
env->exception_index = idx;
|
||||
env->error_code = 0;
|
||||
do_interrupt(env);
|
||||
next_tb = 0;
|
||||
}
|
||||
}
|
||||
#elif defined(TARGET_CRIS)
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD
|
||||
&& (env->pregs[PR_CCS] & I_FLAG)
|
||||
|
9
default-configs/alpha-softmmu.mak
Normal file
9
default-configs/alpha-softmmu.mak
Normal file
@ -0,0 +1,9 @@
|
||||
# Default configuration for alpha-softmmu
|
||||
|
||||
include pci.mak
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_I8254=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_IDE_CORE=y
|
||||
CONFIG_IDE_QDEV=y
|
||||
CONFIG_VMWARE_VGA=y
|
@ -184,6 +184,9 @@ enum bfd_architecture
|
||||
#define bfd_mach_sh5 0x50
|
||||
bfd_arch_alpha, /* Dec Alpha */
|
||||
#define bfd_mach_alpha 1
|
||||
#define bfd_mach_alpha_ev4 0x10
|
||||
#define bfd_mach_alpha_ev5 0x20
|
||||
#define bfd_mach_alpha_ev6 0x30
|
||||
bfd_arch_arm, /* Advanced Risc Machines ARM */
|
||||
#define bfd_mach_arm_unknown 0
|
||||
#define bfd_mach_arm_2 1
|
||||
|
2
disas.c
2
disas.c
@ -205,7 +205,7 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags)
|
||||
disasm_info.mach = bfd_mach_sh4;
|
||||
print_insn = print_insn_sh;
|
||||
#elif defined(TARGET_ALPHA)
|
||||
disasm_info.mach = bfd_mach_alpha;
|
||||
disasm_info.mach = bfd_mach_alpha_ev6;
|
||||
print_insn = print_insn_alpha;
|
||||
#elif defined(TARGET_CRIS)
|
||||
if (flags != 32) {
|
||||
|
@ -325,7 +325,7 @@ static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong add
|
||||
}
|
||||
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
|
||||
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
|
||||
do_unassigned_access(addr, 0, 1, 0, 4);
|
||||
#else
|
||||
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
|
||||
|
12
exec.c
12
exec.c
@ -3193,7 +3193,7 @@ static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 0, 0, 0, 1);
|
||||
#endif
|
||||
return 0;
|
||||
@ -3204,7 +3204,7 @@ static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 0, 0, 0, 2);
|
||||
#endif
|
||||
return 0;
|
||||
@ -3215,7 +3215,7 @@ static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 0, 0, 0, 4);
|
||||
#endif
|
||||
return 0;
|
||||
@ -3226,7 +3226,7 @@ static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 1, 0, 0, 1);
|
||||
#endif
|
||||
}
|
||||
@ -3236,7 +3236,7 @@ static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 1, 0, 0, 2);
|
||||
#endif
|
||||
}
|
||||
@ -3246,7 +3246,7 @@ static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
#endif
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
||||
do_unassigned_access(addr, 1, 0, 0, 4);
|
||||
#endif
|
||||
}
|
||||
|
1048
hw/alpha_palcode.c
1048
hw/alpha_palcode.c
File diff suppressed because it is too large
Load Diff
@ -2508,49 +2508,27 @@ void cpu_loop (CPUState *env)
|
||||
fprintf(stderr, "Machine check exception. Exit\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_ARITH:
|
||||
env->lock_addr = -1;
|
||||
info.si_signo = TARGET_SIGFPE;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TARGET_FPE_FLTINV;
|
||||
info._sifields._sigfault._addr = env->pc;
|
||||
queue_signal(env, info.si_signo, &info);
|
||||
break;
|
||||
case EXCP_HW_INTERRUPT:
|
||||
case EXCP_SMP_INTERRUPT:
|
||||
case EXCP_CLK_INTERRUPT:
|
||||
case EXCP_DEV_INTERRUPT:
|
||||
fprintf(stderr, "External interrupt. Exit\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_DFAULT:
|
||||
case EXCP_MMFAULT:
|
||||
env->lock_addr = -1;
|
||||
info.si_signo = TARGET_SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
info.si_code = (page_get_flags(env->ipr[IPR_EXC_ADDR]) & PAGE_VALID
|
||||
info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
|
||||
? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
|
||||
info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
|
||||
info._sifields._sigfault._addr = env->trap_arg0;
|
||||
queue_signal(env, info.si_signo, &info);
|
||||
break;
|
||||
case EXCP_DTB_MISS_PAL:
|
||||
fprintf(stderr, "MMU data TLB miss in PALcode\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_ITB_MISS:
|
||||
fprintf(stderr, "MMU instruction TLB miss\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_ITB_ACV:
|
||||
fprintf(stderr, "MMU instruction access violation\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_DTB_MISS_NATIVE:
|
||||
fprintf(stderr, "MMU data TLB miss\n");
|
||||
exit(1);
|
||||
break;
|
||||
case EXCP_UNALIGN:
|
||||
env->lock_addr = -1;
|
||||
info.si_signo = TARGET_SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TARGET_BUS_ADRALN;
|
||||
info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
|
||||
info._sifields._sigfault._addr = env->trap_arg0;
|
||||
queue_signal(env, info.si_signo, &info);
|
||||
break;
|
||||
case EXCP_OPCDEC:
|
||||
@ -2562,12 +2540,20 @@ void cpu_loop (CPUState *env)
|
||||
info._sifields._sigfault._addr = env->pc;
|
||||
queue_signal(env, info.si_signo, &info);
|
||||
break;
|
||||
case EXCP_ARITH:
|
||||
env->lock_addr = -1;
|
||||
info.si_signo = TARGET_SIGFPE;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TARGET_FPE_FLTINV;
|
||||
info._sifields._sigfault._addr = env->pc;
|
||||
queue_signal(env, info.si_signo, &info);
|
||||
break;
|
||||
case EXCP_FEN:
|
||||
/* No-op. Linux simply re-enables the FPU. */
|
||||
break;
|
||||
case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1):
|
||||
case EXCP_CALL_PAL:
|
||||
env->lock_addr = -1;
|
||||
switch ((trapnr >> 6) | 0x80) {
|
||||
switch (env->error_code) {
|
||||
case 0x80:
|
||||
/* BPT */
|
||||
info.si_signo = TARGET_SIGTRAP;
|
||||
@ -2658,8 +2644,6 @@ void cpu_loop (CPUState *env)
|
||||
goto do_sigill;
|
||||
}
|
||||
break;
|
||||
case EXCP_CALL_PALP ... (EXCP_CALL_PALE - 1):
|
||||
goto do_sigill;
|
||||
case EXCP_DEBUG:
|
||||
info.si_signo = gdb_handlesig (env, TARGET_SIGTRAP);
|
||||
if (info.si_signo) {
|
||||
|
@ -192,171 +192,39 @@ enum {
|
||||
|
||||
#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
|
||||
|
||||
/* Internal processor registers */
|
||||
/* XXX: TOFIX: most of those registers are implementation dependant */
|
||||
enum {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
IPR_EXC_ADDR,
|
||||
IPR_EXC_SUM,
|
||||
IPR_EXC_MASK,
|
||||
#else
|
||||
/* Ebox IPRs */
|
||||
IPR_CC = 0xC0, /* 21264 */
|
||||
IPR_CC_CTL = 0xC1, /* 21264 */
|
||||
#define IPR_CC_CTL_ENA_SHIFT 32
|
||||
#define IPR_CC_CTL_COUNTER_MASK 0xfffffff0UL
|
||||
IPR_VA = 0xC2, /* 21264 */
|
||||
IPR_VA_CTL = 0xC4, /* 21264 */
|
||||
#define IPR_VA_CTL_VA_48_SHIFT 1
|
||||
#define IPR_VA_CTL_VPTB_SHIFT 30
|
||||
IPR_VA_FORM = 0xC3, /* 21264 */
|
||||
/* Ibox IPRs */
|
||||
IPR_ITB_TAG = 0x00, /* 21264 */
|
||||
IPR_ITB_PTE = 0x01, /* 21264 */
|
||||
IPR_ITB_IAP = 0x02,
|
||||
IPR_ITB_IA = 0x03, /* 21264 */
|
||||
IPR_ITB_IS = 0x04, /* 21264 */
|
||||
IPR_PMPC = 0x05,
|
||||
IPR_EXC_ADDR = 0x06, /* 21264 */
|
||||
IPR_IVA_FORM = 0x07, /* 21264 */
|
||||
IPR_CM = 0x09, /* 21264 */
|
||||
#define IPR_CM_SHIFT 3
|
||||
#define IPR_CM_MASK (3ULL << IPR_CM_SHIFT) /* 21264 */
|
||||
IPR_IER = 0x0A, /* 21264 */
|
||||
#define IPR_IER_MASK 0x0000007fffffe000ULL
|
||||
IPR_IER_CM = 0x0B, /* 21264: = CM | IER */
|
||||
IPR_SIRR = 0x0C, /* 21264 */
|
||||
#define IPR_SIRR_SHIFT 14
|
||||
#define IPR_SIRR_MASK 0x7fff
|
||||
IPR_ISUM = 0x0D, /* 21264 */
|
||||
IPR_HW_INT_CLR = 0x0E, /* 21264 */
|
||||
IPR_EXC_SUM = 0x0F,
|
||||
IPR_PAL_BASE = 0x10,
|
||||
IPR_I_CTL = 0x11,
|
||||
#define IPR_I_CTL_CHIP_ID_SHIFT 24 /* 21264 */
|
||||
#define IPR_I_CTL_BIST_FAIL (1 << 23) /* 21264 */
|
||||
#define IPR_I_CTL_IC_EN_SHIFT 2 /* 21264 */
|
||||
#define IPR_I_CTL_SDE1_SHIFT 7 /* 21264 */
|
||||
#define IPR_I_CTL_HWE_SHIFT 12 /* 21264 */
|
||||
#define IPR_I_CTL_VA_48_SHIFT 15 /* 21264 */
|
||||
#define IPR_I_CTL_SPE_SHIFT 3 /* 21264 */
|
||||
#define IPR_I_CTL_CALL_PAL_R23_SHIFT 20 /* 21264 */
|
||||
IPR_I_STAT = 0x16, /* 21264 */
|
||||
IPR_IC_FLUSH = 0x13, /* 21264 */
|
||||
IPR_IC_FLUSH_ASM = 0x12, /* 21264 */
|
||||
IPR_CLR_MAP = 0x15,
|
||||
IPR_SLEEP = 0x17,
|
||||
IPR_PCTX = 0x40,
|
||||
IPR_PCTX_ASN = 0x01, /* field */
|
||||
#define IPR_PCTX_ASN_SHIFT 39
|
||||
IPR_PCTX_ASTER = 0x02, /* field */
|
||||
#define IPR_PCTX_ASTER_SHIFT 5
|
||||
IPR_PCTX_ASTRR = 0x04, /* field */
|
||||
#define IPR_PCTX_ASTRR_SHIFT 9
|
||||
IPR_PCTX_PPCE = 0x08, /* field */
|
||||
#define IPR_PCTX_PPCE_SHIFT 1
|
||||
IPR_PCTX_FPE = 0x10, /* field */
|
||||
#define IPR_PCTX_FPE_SHIFT 2
|
||||
IPR_PCTX_ALL = 0x5f, /* all fields */
|
||||
IPR_PCTR_CTL = 0x14, /* 21264 */
|
||||
/* Mbox IPRs */
|
||||
IPR_DTB_TAG0 = 0x20, /* 21264 */
|
||||
IPR_DTB_TAG1 = 0xA0, /* 21264 */
|
||||
IPR_DTB_PTE0 = 0x21, /* 21264 */
|
||||
IPR_DTB_PTE1 = 0xA1, /* 21264 */
|
||||
IPR_DTB_ALTMODE = 0xA6,
|
||||
IPR_DTB_ALTMODE0 = 0x26, /* 21264 */
|
||||
#define IPR_DTB_ALTMODE_MASK 3
|
||||
IPR_DTB_IAP = 0xA2,
|
||||
IPR_DTB_IA = 0xA3, /* 21264 */
|
||||
IPR_DTB_IS0 = 0x24,
|
||||
IPR_DTB_IS1 = 0xA4,
|
||||
IPR_DTB_ASN0 = 0x25, /* 21264 */
|
||||
IPR_DTB_ASN1 = 0xA5, /* 21264 */
|
||||
#define IPR_DTB_ASN_SHIFT 56
|
||||
IPR_MM_STAT = 0x27, /* 21264 */
|
||||
IPR_M_CTL = 0x28, /* 21264 */
|
||||
#define IPR_M_CTL_SPE_SHIFT 1
|
||||
#define IPR_M_CTL_SPE_MASK 7
|
||||
IPR_DC_CTL = 0x29, /* 21264 */
|
||||
IPR_DC_STAT = 0x2A, /* 21264 */
|
||||
/* Cbox IPRs */
|
||||
IPR_C_DATA = 0x2B,
|
||||
IPR_C_SHIFT = 0x2C,
|
||||
/* MMU modes definitions */
|
||||
|
||||
IPR_ASN,
|
||||
IPR_ASTEN,
|
||||
IPR_ASTSR,
|
||||
IPR_DATFX,
|
||||
IPR_ESP,
|
||||
IPR_FEN,
|
||||
IPR_IPIR,
|
||||
IPR_IPL,
|
||||
IPR_KSP,
|
||||
IPR_MCES,
|
||||
IPR_PERFMON,
|
||||
IPR_PCBB,
|
||||
IPR_PRBR,
|
||||
IPR_PTBR,
|
||||
IPR_SCBB,
|
||||
IPR_SISR,
|
||||
IPR_SSP,
|
||||
IPR_SYSPTBR,
|
||||
IPR_TBCHK,
|
||||
IPR_TBIA,
|
||||
IPR_TBIAP,
|
||||
IPR_TBIS,
|
||||
IPR_TBISD,
|
||||
IPR_TBISI,
|
||||
IPR_USP,
|
||||
IPR_VIRBND,
|
||||
IPR_VPTB,
|
||||
IPR_WHAMI,
|
||||
IPR_ALT_MODE,
|
||||
#endif
|
||||
IPR_LAST,
|
||||
};
|
||||
/* Alpha has 5 MMU modes: PALcode, kernel, executive, supervisor, and user.
|
||||
The Unix PALcode only exposes the kernel and user modes; presumably
|
||||
executive and supervisor are used by VMS.
|
||||
|
||||
PALcode itself uses physical mode for code and kernel mode for data;
|
||||
there are PALmode instructions that can access data via physical mode
|
||||
or via an os-installed "alternate mode", which is one of the 4 above.
|
||||
|
||||
QEMU does not currently properly distinguish between code/data when
|
||||
looking up addresses. To avoid having to address this issue, our
|
||||
emulated PALcode will cheat and use the KSEG mapping for its code+data
|
||||
rather than physical addresses.
|
||||
|
||||
Moreover, we're only emulating Unix PALcode, and not attempting VMS.
|
||||
|
||||
All of which allows us to drop all but kernel and user modes.
|
||||
Elide the unused MMU modes to save space. */
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_KERNEL_IDX 0
|
||||
#define MMU_USER_IDX 1
|
||||
|
||||
typedef struct CPUAlphaState CPUAlphaState;
|
||||
|
||||
typedef struct pal_handler_t pal_handler_t;
|
||||
struct pal_handler_t {
|
||||
/* Reset */
|
||||
void (*reset)(CPUAlphaState *env);
|
||||
/* Uncorrectable hardware error */
|
||||
void (*machine_check)(CPUAlphaState *env);
|
||||
/* Arithmetic exception */
|
||||
void (*arithmetic)(CPUAlphaState *env);
|
||||
/* Interrupt / correctable hardware error */
|
||||
void (*interrupt)(CPUAlphaState *env);
|
||||
/* Data fault */
|
||||
void (*dfault)(CPUAlphaState *env);
|
||||
/* DTB miss pal */
|
||||
void (*dtb_miss_pal)(CPUAlphaState *env);
|
||||
/* DTB miss native */
|
||||
void (*dtb_miss_native)(CPUAlphaState *env);
|
||||
/* Unaligned access */
|
||||
void (*unalign)(CPUAlphaState *env);
|
||||
/* ITB miss */
|
||||
void (*itb_miss)(CPUAlphaState *env);
|
||||
/* Instruction stream access violation */
|
||||
void (*itb_acv)(CPUAlphaState *env);
|
||||
/* Reserved or privileged opcode */
|
||||
void (*opcdec)(CPUAlphaState *env);
|
||||
/* Floating point exception */
|
||||
void (*fen)(CPUAlphaState *env);
|
||||
/* Call pal instruction */
|
||||
void (*call_pal)(CPUAlphaState *env, uint32_t palcode);
|
||||
};
|
||||
|
||||
#define NB_MMU_MODES 4
|
||||
|
||||
struct CPUAlphaState {
|
||||
uint64_t ir[31];
|
||||
float64 fir[31];
|
||||
uint64_t pc;
|
||||
uint64_t ipr[IPR_LAST];
|
||||
uint64_t ps;
|
||||
uint64_t unique;
|
||||
uint64_t lock_addr;
|
||||
uint64_t lock_st_addr;
|
||||
@ -371,10 +239,33 @@ struct CPUAlphaState {
|
||||
uint8_t fpcr_dnod;
|
||||
uint8_t fpcr_undz;
|
||||
|
||||
/* Used for HW_LD / HW_ST */
|
||||
uint8_t saved_mode;
|
||||
/* For RC and RS */
|
||||
/* The Internal Processor Registers. Some of these we assume always
|
||||
exist for use in user-mode. */
|
||||
uint8_t ps;
|
||||
uint8_t intr_flag;
|
||||
uint8_t pal_mode;
|
||||
uint8_t fen;
|
||||
|
||||
uint32_t pcc_ofs;
|
||||
|
||||
/* These pass data from the exception logic in the translator and
|
||||
helpers to the OS entry point. This is used for both system
|
||||
emulation and user-mode. */
|
||||
uint64_t trap_arg0;
|
||||
uint64_t trap_arg1;
|
||||
uint64_t trap_arg2;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* The internal data required by our emulation of the Unix PALcode. */
|
||||
uint64_t exc_addr;
|
||||
uint64_t palbr;
|
||||
uint64_t ptbr;
|
||||
uint64_t vptptr;
|
||||
uint64_t sysval;
|
||||
uint64_t usp;
|
||||
uint64_t shadow[8];
|
||||
uint64_t scratch[24];
|
||||
#endif
|
||||
|
||||
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
||||
/* temporary fixed-point registers
|
||||
@ -386,14 +277,11 @@ struct CPUAlphaState {
|
||||
/* Those resources are used only in Qemu core */
|
||||
CPU_COMMON
|
||||
|
||||
uint32_t hflags;
|
||||
|
||||
int error_code;
|
||||
|
||||
uint32_t features;
|
||||
uint32_t amask;
|
||||
int implver;
|
||||
pal_handler_t *pal_handler;
|
||||
};
|
||||
|
||||
#define cpu_init cpu_alpha_init
|
||||
@ -401,17 +289,6 @@ struct CPUAlphaState {
|
||||
#define cpu_gen_code cpu_alpha_gen_code
|
||||
#define cpu_signal_handler cpu_alpha_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _executive
|
||||
#define MMU_MODE2_SUFFIX _supervisor
|
||||
#define MMU_MODE3_SUFFIX _user
|
||||
#define MMU_USER_IDX 3
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->ps >> 3) & 3;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
enum {
|
||||
@ -422,36 +299,89 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
EXCP_RESET = 0x0000,
|
||||
EXCP_MCHK = 0x0020,
|
||||
EXCP_ARITH = 0x0060,
|
||||
EXCP_HW_INTERRUPT = 0x00E0,
|
||||
EXCP_DFAULT = 0x01E0,
|
||||
EXCP_DTB_MISS_PAL = 0x09E0,
|
||||
EXCP_ITB_MISS = 0x03E0,
|
||||
EXCP_ITB_ACV = 0x07E0,
|
||||
EXCP_DTB_MISS_NATIVE = 0x08E0,
|
||||
EXCP_UNALIGN = 0x11E0,
|
||||
EXCP_OPCDEC = 0x13E0,
|
||||
EXCP_FEN = 0x17E0,
|
||||
EXCP_CALL_PAL = 0x2000,
|
||||
EXCP_CALL_PALP = 0x3000,
|
||||
EXCP_CALL_PALE = 0x4000,
|
||||
/* Pseudo exception for console */
|
||||
EXCP_CONSOLE_DISPATCH = 0x4001,
|
||||
EXCP_CONSOLE_FIXUP = 0x4002,
|
||||
EXCP_STL_C = 0x4003,
|
||||
EXCP_STQ_C = 0x4004,
|
||||
EXCP_RESET,
|
||||
EXCP_MCHK,
|
||||
EXCP_SMP_INTERRUPT,
|
||||
EXCP_CLK_INTERRUPT,
|
||||
EXCP_DEV_INTERRUPT,
|
||||
EXCP_MMFAULT,
|
||||
EXCP_UNALIGN,
|
||||
EXCP_OPCDEC,
|
||||
EXCP_ARITH,
|
||||
EXCP_FEN,
|
||||
EXCP_CALL_PAL,
|
||||
/* For Usermode emulation. */
|
||||
EXCP_STL_C,
|
||||
EXCP_STQ_C,
|
||||
};
|
||||
|
||||
/* Arithmetic exception */
|
||||
#define EXC_M_IOV (1<<16) /* Integer Overflow */
|
||||
#define EXC_M_INE (1<<15) /* Inexact result */
|
||||
#define EXC_M_UNF (1<<14) /* Underflow */
|
||||
#define EXC_M_FOV (1<<13) /* Overflow */
|
||||
#define EXC_M_DZE (1<<12) /* Division by zero */
|
||||
#define EXC_M_INV (1<<11) /* Invalid operation */
|
||||
#define EXC_M_SWC (1<<10) /* Software completion */
|
||||
/* Alpha-specific interrupt pending bits. */
|
||||
#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
|
||||
#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1
|
||||
#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
|
||||
|
||||
/* OSF/1 Page table bits. */
|
||||
enum {
|
||||
PTE_VALID = 0x0001,
|
||||
PTE_FOR = 0x0002, /* used for page protection (fault on read) */
|
||||
PTE_FOW = 0x0004, /* used for page protection (fault on write) */
|
||||
PTE_FOE = 0x0008, /* used for page protection (fault on exec) */
|
||||
PTE_ASM = 0x0010,
|
||||
PTE_KRE = 0x0100,
|
||||
PTE_URE = 0x0200,
|
||||
PTE_KWE = 0x1000,
|
||||
PTE_UWE = 0x2000
|
||||
};
|
||||
|
||||
/* Hardware interrupt (entInt) constants. */
|
||||
enum {
|
||||
INT_K_IP,
|
||||
INT_K_CLK,
|
||||
INT_K_MCHK,
|
||||
INT_K_DEV,
|
||||
INT_K_PERF,
|
||||
};
|
||||
|
||||
/* Memory management (entMM) constants. */
|
||||
enum {
|
||||
MM_K_TNV,
|
||||
MM_K_ACV,
|
||||
MM_K_FOR,
|
||||
MM_K_FOE,
|
||||
MM_K_FOW
|
||||
};
|
||||
|
||||
/* Arithmetic exception (entArith) constants. */
|
||||
enum {
|
||||
EXC_M_SWC = 1, /* Software completion */
|
||||
EXC_M_INV = 2, /* Invalid operation */
|
||||
EXC_M_DZE = 4, /* Division by zero */
|
||||
EXC_M_FOV = 8, /* Overflow */
|
||||
EXC_M_UNF = 16, /* Underflow */
|
||||
EXC_M_INE = 32, /* Inexact result */
|
||||
EXC_M_IOV = 64 /* Integer Overflow */
|
||||
};
|
||||
|
||||
/* Processor status constants. */
|
||||
enum {
|
||||
/* Low 3 bits are interrupt mask level. */
|
||||
PS_INT_MASK = 7,
|
||||
|
||||
/* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes;
|
||||
The Unix PALcode only uses bit 4. */
|
||||
PS_USER_MODE = 8
|
||||
};
|
||||
|
||||
static inline int cpu_mmu_index(CPUState *env)
|
||||
{
|
||||
if (env->pal_mode) {
|
||||
return MMU_KERNEL_IDX;
|
||||
} else if (env->ps & PS_USER_MODE) {
|
||||
return MMU_USER_IDX;
|
||||
} else {
|
||||
return MMU_KERNEL_IDX;
|
||||
}
|
||||
}
|
||||
|
||||
enum {
|
||||
IR_V0 = 0,
|
||||
@ -504,19 +434,46 @@ void do_interrupt (CPUState *env);
|
||||
|
||||
uint64_t cpu_alpha_load_fpcr (CPUState *env);
|
||||
void cpu_alpha_store_fpcr (CPUState *env, uint64_t val);
|
||||
int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
|
||||
int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
void pal_init (CPUState *env);
|
||||
void call_pal (CPUState *env);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void swap_shadow_regs(CPUState *env);
|
||||
extern QEMU_NORETURN void do_unassigned_access(target_phys_addr_t addr,
|
||||
int, int, int, int);
|
||||
#endif
|
||||
|
||||
/* Bits in TB->FLAGS that control how translation is processed. */
|
||||
enum {
|
||||
TB_FLAGS_PAL_MODE = 1,
|
||||
TB_FLAGS_FEN = 2,
|
||||
TB_FLAGS_USER_MODE = 8,
|
||||
|
||||
TB_FLAGS_AMASK_SHIFT = 4,
|
||||
TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT,
|
||||
TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT,
|
||||
TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT,
|
||||
TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT,
|
||||
TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT,
|
||||
TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT,
|
||||
};
|
||||
|
||||
static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, int *flags)
|
||||
target_ulong *cs_base, int *pflags)
|
||||
{
|
||||
int flags = 0;
|
||||
|
||||
*pc = env->pc;
|
||||
*cs_base = 0;
|
||||
*flags = env->ps;
|
||||
|
||||
if (env->pal_mode) {
|
||||
flags = TB_FLAGS_PAL_MODE;
|
||||
} else {
|
||||
flags = env->ps & PS_USER_MODE;
|
||||
}
|
||||
if (env->fen) {
|
||||
flags |= TB_FLAGS_FEN;
|
||||
}
|
||||
flags |= env->amask << TB_FLAGS_AMASK_SHIFT;
|
||||
|
||||
*pflags = flags;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
@ -39,7 +39,17 @@ register struct CPUAlphaState *env asm(AREG0);
|
||||
|
||||
static inline int cpu_has_work(CPUState *env)
|
||||
{
|
||||
return (env->interrupt_request & CPU_INTERRUPT_HARD);
|
||||
/* Here we are checking to see if the CPU should wake up from HALT.
|
||||
We will have gotten into this state only for WTINT from PALmode. */
|
||||
/* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
|
||||
asleep even if (some) interrupts have been asserted. For now,
|
||||
assume that if a CPU really wants to stay asleep, it will mask
|
||||
interrupts at the chipset level, which will prevent these bits
|
||||
from being set in the first place. */
|
||||
return env->interrupt_request & (CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_TIMER
|
||||
| CPU_INTERRUPT_SMP
|
||||
| CPU_INTERRUPT_MCHK);
|
||||
}
|
||||
|
||||
static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
|
||||
|
@ -160,382 +160,299 @@ void cpu_alpha_store_fpcr (CPUState *env, uint64_t val)
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
if (rw == 2)
|
||||
env->exception_index = EXCP_ITB_MISS;
|
||||
else
|
||||
env->exception_index = EXCP_DFAULT;
|
||||
env->ipr[IPR_EXC_ADDR] = address;
|
||||
|
||||
env->exception_index = EXCP_MMFAULT;
|
||||
env->trap_arg0 = address;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void do_interrupt (CPUState *env)
|
||||
#else
|
||||
void swap_shadow_regs(CPUState *env)
|
||||
{
|
||||
env->exception_index = -1;
|
||||
uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
|
||||
|
||||
i0 = env->ir[8];
|
||||
i1 = env->ir[9];
|
||||
i2 = env->ir[10];
|
||||
i3 = env->ir[11];
|
||||
i4 = env->ir[12];
|
||||
i5 = env->ir[13];
|
||||
i6 = env->ir[14];
|
||||
i7 = env->ir[25];
|
||||
|
||||
env->ir[8] = env->shadow[0];
|
||||
env->ir[9] = env->shadow[1];
|
||||
env->ir[10] = env->shadow[2];
|
||||
env->ir[11] = env->shadow[3];
|
||||
env->ir[12] = env->shadow[4];
|
||||
env->ir[13] = env->shadow[5];
|
||||
env->ir[14] = env->shadow[6];
|
||||
env->ir[25] = env->shadow[7];
|
||||
|
||||
env->shadow[0] = i0;
|
||||
env->shadow[1] = i1;
|
||||
env->shadow[2] = i2;
|
||||
env->shadow[3] = i3;
|
||||
env->shadow[4] = i4;
|
||||
env->shadow[5] = i5;
|
||||
env->shadow[6] = i6;
|
||||
env->shadow[7] = i7;
|
||||
}
|
||||
|
||||
#else
|
||||
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
|
||||
static int get_physical_address(CPUState *env, target_ulong addr,
|
||||
int prot_need, int mmu_idx,
|
||||
target_ulong *pphys, int *pprot)
|
||||
{
|
||||
target_long saddr = addr;
|
||||
target_ulong phys = 0;
|
||||
target_ulong L1pte, L2pte, L3pte;
|
||||
target_ulong pt, index;
|
||||
int prot = 0;
|
||||
int ret = MM_K_ACV;
|
||||
|
||||
/* Ensure that the virtual address is properly sign-extended from
|
||||
the last implemented virtual address bit. */
|
||||
if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Translate the superpage. */
|
||||
/* ??? When we do more than emulate Unix PALcode, we'll need to
|
||||
determine which KSEG is actually active. */
|
||||
if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
|
||||
/* User-space cannot access KSEG addresses. */
|
||||
if (mmu_idx != MMU_KERNEL_IDX) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
|
||||
We would not do this if the 48-bit KSEG is enabled. */
|
||||
phys = saddr & ((1ull << 40) - 1);
|
||||
phys |= (saddr & (1ull << 40)) << 3;
|
||||
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
ret = -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Interpret the page table exactly like PALcode does. */
|
||||
|
||||
pt = env->ptbr;
|
||||
|
||||
/* L1 page table read. */
|
||||
index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
|
||||
L1pte = ldq_phys(pt + index*8);
|
||||
|
||||
if (unlikely((L1pte & PTE_VALID) == 0)) {
|
||||
ret = MM_K_TNV;
|
||||
goto exit;
|
||||
}
|
||||
if (unlikely((L1pte & PTE_KRE) == 0)) {
|
||||
goto exit;
|
||||
}
|
||||
pt = L1pte >> 32 << TARGET_PAGE_BITS;
|
||||
|
||||
/* L2 page table read. */
|
||||
index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
|
||||
L2pte = ldq_phys(pt + index*8);
|
||||
|
||||
if (unlikely((L2pte & PTE_VALID) == 0)) {
|
||||
ret = MM_K_TNV;
|
||||
goto exit;
|
||||
}
|
||||
if (unlikely((L2pte & PTE_KRE) == 0)) {
|
||||
goto exit;
|
||||
}
|
||||
pt = L2pte >> 32 << TARGET_PAGE_BITS;
|
||||
|
||||
/* L3 page table read. */
|
||||
index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
|
||||
L3pte = ldq_phys(pt + index*8);
|
||||
|
||||
phys = L3pte >> 32 << TARGET_PAGE_BITS;
|
||||
if (unlikely((L3pte & PTE_VALID) == 0)) {
|
||||
ret = MM_K_TNV;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
|
||||
# error page bits out of date
|
||||
#endif
|
||||
|
||||
/* Check access violations. */
|
||||
if (L3pte & (PTE_KRE << mmu_idx)) {
|
||||
prot |= PAGE_READ | PAGE_EXEC;
|
||||
}
|
||||
if (L3pte & (PTE_KWE << mmu_idx)) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
if (unlikely((prot & prot_need) == 0 && prot_need)) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Check fault-on-operation violations. */
|
||||
prot &= ~(L3pte >> 1);
|
||||
ret = -1;
|
||||
if (unlikely((prot & prot_need) == 0)) {
|
||||
ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
|
||||
prot_need & PAGE_WRITE ? MM_K_FOW :
|
||||
prot_need & PAGE_READ ? MM_K_FOR : -1);
|
||||
}
|
||||
|
||||
exit:
|
||||
*pphys = phys;
|
||||
*pprot = prot;
|
||||
return ret;
|
||||
}
|
||||
|
||||
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
|
||||
{
|
||||
return -1;
|
||||
target_ulong phys;
|
||||
int prot, fail;
|
||||
|
||||
fail = get_physical_address(env, addr, 0, 0, &phys, &prot);
|
||||
return (fail >= 0 ? -1 : phys);
|
||||
}
|
||||
|
||||
int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int cpu_alpha_handle_mmu_fault(CPUState *env, target_ulong addr, int rw,
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
uint32_t opc;
|
||||
|
||||
if (rw == 2) {
|
||||
/* Instruction translation buffer miss */
|
||||
env->exception_index = EXCP_ITB_MISS;
|
||||
} else {
|
||||
if (env->ipr[IPR_EXC_ADDR] & 1)
|
||||
env->exception_index = EXCP_DTB_MISS_PAL;
|
||||
else
|
||||
env->exception_index = EXCP_DTB_MISS_NATIVE;
|
||||
opc = (ldl_code(env->pc) >> 21) << 4;
|
||||
if (rw) {
|
||||
opc |= 0x9;
|
||||
} else {
|
||||
opc |= 0x4;
|
||||
}
|
||||
env->ipr[IPR_MM_STAT] = opc;
|
||||
}
|
||||
target_ulong phys;
|
||||
int prot, fail;
|
||||
|
||||
fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
|
||||
if (unlikely(fail >= 0)) {
|
||||
env->exception_index = EXCP_MMFAULT;
|
||||
env->trap_arg0 = addr;
|
||||
env->trap_arg1 = fail;
|
||||
env->trap_arg2 = (rw == 2 ? -1 : rw);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp)
|
||||
{
|
||||
uint64_t hwpcb;
|
||||
int ret = 0;
|
||||
|
||||
hwpcb = env->ipr[IPR_PCBB];
|
||||
switch (iprn) {
|
||||
case IPR_ASN:
|
||||
if (env->features & FEATURE_ASN)
|
||||
*valp = env->ipr[IPR_ASN];
|
||||
else
|
||||
*valp = 0;
|
||||
break;
|
||||
case IPR_ASTEN:
|
||||
*valp = ((int64_t)(env->ipr[IPR_ASTEN] << 60)) >> 60;
|
||||
break;
|
||||
case IPR_ASTSR:
|
||||
*valp = ((int64_t)(env->ipr[IPR_ASTSR] << 60)) >> 60;
|
||||
break;
|
||||
case IPR_DATFX:
|
||||
/* Write only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_ESP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
*valp = env->ipr[IPR_ESP];
|
||||
else
|
||||
*valp = ldq_raw(hwpcb + 8);
|
||||
break;
|
||||
case IPR_FEN:
|
||||
*valp = ((int64_t)(env->ipr[IPR_FEN] << 63)) >> 63;
|
||||
break;
|
||||
case IPR_IPIR:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_IPL:
|
||||
*valp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
|
||||
break;
|
||||
case IPR_KSP:
|
||||
if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
|
||||
ret = -1;
|
||||
} else {
|
||||
if (env->features & FEATURE_SPS)
|
||||
*valp = env->ipr[IPR_KSP];
|
||||
else
|
||||
*valp = ldq_raw(hwpcb + 0);
|
||||
}
|
||||
break;
|
||||
case IPR_MCES:
|
||||
*valp = ((int64_t)(env->ipr[IPR_MCES] << 59)) >> 59;
|
||||
break;
|
||||
case IPR_PERFMON:
|
||||
/* Implementation specific */
|
||||
*valp = 0;
|
||||
break;
|
||||
case IPR_PCBB:
|
||||
*valp = ((int64_t)env->ipr[IPR_PCBB] << 16) >> 16;
|
||||
break;
|
||||
case IPR_PRBR:
|
||||
*valp = env->ipr[IPR_PRBR];
|
||||
break;
|
||||
case IPR_PTBR:
|
||||
*valp = env->ipr[IPR_PTBR];
|
||||
break;
|
||||
case IPR_SCBB:
|
||||
*valp = (int64_t)((int32_t)env->ipr[IPR_SCBB]);
|
||||
break;
|
||||
case IPR_SIRR:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_SISR:
|
||||
*valp = (int64_t)((int16_t)env->ipr[IPR_SISR]);
|
||||
case IPR_SSP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
*valp = env->ipr[IPR_SSP];
|
||||
else
|
||||
*valp = ldq_raw(hwpcb + 16);
|
||||
break;
|
||||
case IPR_SYSPTBR:
|
||||
if (env->features & FEATURE_VIRBND)
|
||||
*valp = env->ipr[IPR_SYSPTBR];
|
||||
else
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBCHK:
|
||||
if ((env->features & FEATURE_TBCHK)) {
|
||||
/* XXX: TODO */
|
||||
*valp = 0;
|
||||
ret = -1;
|
||||
} else {
|
||||
ret = -1;
|
||||
}
|
||||
break;
|
||||
case IPR_TBIA:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBIAP:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBIS:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBISD:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBISI:
|
||||
/* Write-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_USP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
*valp = env->ipr[IPR_USP];
|
||||
else
|
||||
*valp = ldq_raw(hwpcb + 24);
|
||||
break;
|
||||
case IPR_VIRBND:
|
||||
if (env->features & FEATURE_VIRBND)
|
||||
*valp = env->ipr[IPR_VIRBND];
|
||||
else
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_VPTB:
|
||||
*valp = env->ipr[IPR_VPTB];
|
||||
break;
|
||||
case IPR_WHAMI:
|
||||
*valp = env->ipr[IPR_WHAMI];
|
||||
break;
|
||||
default:
|
||||
/* Invalid */
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp)
|
||||
{
|
||||
uint64_t hwpcb, tmp64;
|
||||
uint8_t tmp8;
|
||||
int ret = 0;
|
||||
|
||||
hwpcb = env->ipr[IPR_PCBB];
|
||||
switch (iprn) {
|
||||
case IPR_ASN:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_ASTEN:
|
||||
tmp8 = ((int8_t)(env->ipr[IPR_ASTEN] << 4)) >> 4;
|
||||
*oldvalp = tmp8;
|
||||
tmp8 &= val & 0xF;
|
||||
tmp8 |= (val >> 4) & 0xF;
|
||||
env->ipr[IPR_ASTEN] &= ~0xF;
|
||||
env->ipr[IPR_ASTEN] |= tmp8;
|
||||
ret = 1;
|
||||
break;
|
||||
case IPR_ASTSR:
|
||||
tmp8 = ((int8_t)(env->ipr[IPR_ASTSR] << 4)) >> 4;
|
||||
*oldvalp = tmp8;
|
||||
tmp8 &= val & 0xF;
|
||||
tmp8 |= (val >> 4) & 0xF;
|
||||
env->ipr[IPR_ASTSR] &= ~0xF;
|
||||
env->ipr[IPR_ASTSR] |= tmp8;
|
||||
ret = 1;
|
||||
case IPR_DATFX:
|
||||
env->ipr[IPR_DATFX] &= ~0x1;
|
||||
env->ipr[IPR_DATFX] |= val & 1;
|
||||
tmp64 = ldq_raw(hwpcb + 56);
|
||||
tmp64 &= ~0x8000000000000000ULL;
|
||||
tmp64 |= (val & 1) << 63;
|
||||
stq_raw(hwpcb + 56, tmp64);
|
||||
break;
|
||||
case IPR_ESP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
env->ipr[IPR_ESP] = val;
|
||||
else
|
||||
stq_raw(hwpcb + 8, val);
|
||||
break;
|
||||
case IPR_FEN:
|
||||
env->ipr[IPR_FEN] = val & 1;
|
||||
tmp64 = ldq_raw(hwpcb + 56);
|
||||
tmp64 &= ~1;
|
||||
tmp64 |= val & 1;
|
||||
stq_raw(hwpcb + 56, tmp64);
|
||||
break;
|
||||
case IPR_IPIR:
|
||||
/* XXX: TODO: Send IRQ to CPU #ir[16] */
|
||||
break;
|
||||
case IPR_IPL:
|
||||
*oldvalp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
|
||||
env->ipr[IPR_IPL] &= ~0x1F;
|
||||
env->ipr[IPR_IPL] |= val & 0x1F;
|
||||
/* XXX: may issue an interrupt or ASR _now_ */
|
||||
ret = 1;
|
||||
break;
|
||||
case IPR_KSP:
|
||||
if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
|
||||
ret = -1;
|
||||
} else {
|
||||
if (env->features & FEATURE_SPS)
|
||||
env->ipr[IPR_KSP] = val;
|
||||
else
|
||||
stq_raw(hwpcb + 0, val);
|
||||
}
|
||||
break;
|
||||
case IPR_MCES:
|
||||
env->ipr[IPR_MCES] &= ~((val & 0x7) | 0x18);
|
||||
env->ipr[IPR_MCES] |= val & 0x18;
|
||||
break;
|
||||
case IPR_PERFMON:
|
||||
/* Implementation specific */
|
||||
*oldvalp = 0;
|
||||
ret = 1;
|
||||
break;
|
||||
case IPR_PCBB:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_PRBR:
|
||||
env->ipr[IPR_PRBR] = val;
|
||||
break;
|
||||
case IPR_PTBR:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_SCBB:
|
||||
env->ipr[IPR_SCBB] = (uint32_t)val;
|
||||
break;
|
||||
case IPR_SIRR:
|
||||
if (val & 0xF) {
|
||||
env->ipr[IPR_SISR] |= 1 << (val & 0xF);
|
||||
/* XXX: request a software interrupt _now_ */
|
||||
}
|
||||
break;
|
||||
case IPR_SISR:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_SSP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
env->ipr[IPR_SSP] = val;
|
||||
else
|
||||
stq_raw(hwpcb + 16, val);
|
||||
break;
|
||||
case IPR_SYSPTBR:
|
||||
if (env->features & FEATURE_VIRBND)
|
||||
env->ipr[IPR_SYSPTBR] = val;
|
||||
else
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBCHK:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_TBIA:
|
||||
tlb_flush(env, 1);
|
||||
break;
|
||||
case IPR_TBIAP:
|
||||
tlb_flush(env, 1);
|
||||
break;
|
||||
case IPR_TBIS:
|
||||
tlb_flush_page(env, val);
|
||||
break;
|
||||
case IPR_TBISD:
|
||||
tlb_flush_page(env, val);
|
||||
break;
|
||||
case IPR_TBISI:
|
||||
tlb_flush_page(env, val);
|
||||
break;
|
||||
case IPR_USP:
|
||||
if (env->features & FEATURE_SPS)
|
||||
env->ipr[IPR_USP] = val;
|
||||
else
|
||||
stq_raw(hwpcb + 24, val);
|
||||
break;
|
||||
case IPR_VIRBND:
|
||||
if (env->features & FEATURE_VIRBND)
|
||||
env->ipr[IPR_VIRBND] = val;
|
||||
else
|
||||
ret = -1;
|
||||
break;
|
||||
case IPR_VPTB:
|
||||
env->ipr[IPR_VPTB] = val;
|
||||
break;
|
||||
case IPR_WHAMI:
|
||||
/* Read-only */
|
||||
ret = -1;
|
||||
break;
|
||||
default:
|
||||
/* Invalid */
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, TARGET_PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
#endif /* USER_ONLY */
|
||||
|
||||
void do_interrupt (CPUState *env)
|
||||
{
|
||||
int excp;
|
||||
int i = env->exception_index;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
||||
static int count;
|
||||
const char *name = "<unknown>";
|
||||
|
||||
switch (i) {
|
||||
case EXCP_RESET:
|
||||
name = "reset";
|
||||
break;
|
||||
case EXCP_MCHK:
|
||||
name = "mchk";
|
||||
break;
|
||||
case EXCP_SMP_INTERRUPT:
|
||||
name = "smp_interrupt";
|
||||
break;
|
||||
case EXCP_CLK_INTERRUPT:
|
||||
name = "clk_interrupt";
|
||||
break;
|
||||
case EXCP_DEV_INTERRUPT:
|
||||
name = "dev_interrupt";
|
||||
break;
|
||||
case EXCP_MMFAULT:
|
||||
name = "mmfault";
|
||||
break;
|
||||
case EXCP_UNALIGN:
|
||||
name = "unalign";
|
||||
break;
|
||||
case EXCP_OPCDEC:
|
||||
name = "opcdec";
|
||||
break;
|
||||
case EXCP_ARITH:
|
||||
name = "arith";
|
||||
break;
|
||||
case EXCP_FEN:
|
||||
name = "fen";
|
||||
break;
|
||||
case EXCP_CALL_PAL:
|
||||
name = "call_pal";
|
||||
break;
|
||||
case EXCP_STL_C:
|
||||
name = "stl_c";
|
||||
break;
|
||||
case EXCP_STQ_C:
|
||||
name = "stq_c";
|
||||
break;
|
||||
}
|
||||
qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n",
|
||||
++count, name, env->error_code, env->pc, env->ir[IR_SP]);
|
||||
}
|
||||
|
||||
env->ipr[IPR_EXC_ADDR] = env->pc | 1;
|
||||
excp = env->exception_index;
|
||||
env->exception_index = -1;
|
||||
env->error_code = 0;
|
||||
/* XXX: disable interrupts and memory mapping */
|
||||
if (env->ipr[IPR_PAL_BASE] != -1ULL) {
|
||||
/* We use native PALcode */
|
||||
env->pc = env->ipr[IPR_PAL_BASE] + excp;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
switch (i) {
|
||||
case EXCP_RESET:
|
||||
i = 0x0000;
|
||||
break;
|
||||
case EXCP_MCHK:
|
||||
i = 0x0080;
|
||||
break;
|
||||
case EXCP_SMP_INTERRUPT:
|
||||
i = 0x0100;
|
||||
break;
|
||||
case EXCP_CLK_INTERRUPT:
|
||||
i = 0x0180;
|
||||
break;
|
||||
case EXCP_DEV_INTERRUPT:
|
||||
i = 0x0200;
|
||||
break;
|
||||
case EXCP_MMFAULT:
|
||||
i = 0x0280;
|
||||
break;
|
||||
case EXCP_UNALIGN:
|
||||
i = 0x0300;
|
||||
break;
|
||||
case EXCP_OPCDEC:
|
||||
i = 0x0380;
|
||||
break;
|
||||
case EXCP_ARITH:
|
||||
i = 0x0400;
|
||||
break;
|
||||
case EXCP_FEN:
|
||||
i = 0x0480;
|
||||
break;
|
||||
case EXCP_CALL_PAL:
|
||||
i = env->error_code;
|
||||
/* There are 64 entry points for both privileged and unprivileged,
|
||||
with bit 0x80 indicating unprivileged. Each entry point gets
|
||||
64 bytes to do its job. */
|
||||
if (i & 0x80) {
|
||||
i = 0x2000 + (i - 0x80) * 64;
|
||||
} else {
|
||||
/* We use emulated PALcode */
|
||||
call_pal(env);
|
||||
/* Emulate REI */
|
||||
env->pc = env->ipr[IPR_EXC_ADDR] & ~7;
|
||||
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
i = 0x1000 + i * 64;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
cpu_abort(env, "Unhandled CPU exception");
|
||||
}
|
||||
|
||||
/* Remember where the exception happened. Emulate real hardware in
|
||||
that the low bit of the PC indicates PALmode. */
|
||||
env->exc_addr = env->pc | env->pal_mode;
|
||||
|
||||
/* Continue execution at the PALcode entry point. */
|
||||
env->pc = env->palbr + i;
|
||||
|
||||
/* Switch to PALmode. */
|
||||
if (!env->pal_mode) {
|
||||
env->pal_mode = 1;
|
||||
swap_shadow_regs(env);
|
||||
}
|
||||
#endif /* !USER_ONLY */
|
||||
}
|
||||
#endif
|
||||
|
||||
void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
|
||||
int flags)
|
||||
@ -548,7 +465,7 @@ void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
|
||||
};
|
||||
int i;
|
||||
|
||||
cpu_fprintf(f, " PC " TARGET_FMT_lx " PS " TARGET_FMT_lx "\n",
|
||||
cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
|
||||
env->pc, env->ps);
|
||||
for (i = 0; i < 31; i++) {
|
||||
cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
|
||||
|
@ -100,27 +100,19 @@ DEF_HELPER_1(ieee_input_cmp, i64, i64)
|
||||
DEF_HELPER_1(ieee_input_s, i64, i64)
|
||||
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
DEF_HELPER_0(hw_rei, void)
|
||||
DEF_HELPER_1(hw_ret, void, i64)
|
||||
DEF_HELPER_2(mfpr, i64, int, i64)
|
||||
DEF_HELPER_2(mtpr, void, int, i64)
|
||||
DEF_HELPER_0(set_alt_mode, void)
|
||||
DEF_HELPER_0(restore_mode, void)
|
||||
|
||||
DEF_HELPER_1(ld_virt_to_phys, i64, i64)
|
||||
DEF_HELPER_1(st_virt_to_phys, i64, i64)
|
||||
DEF_HELPER_2(ldl_raw, void, i64, i64)
|
||||
DEF_HELPER_2(ldq_raw, void, i64, i64)
|
||||
DEF_HELPER_2(ldl_l_raw, void, i64, i64)
|
||||
DEF_HELPER_2(ldq_l_raw, void, i64, i64)
|
||||
DEF_HELPER_2(ldl_kernel, void, i64, i64)
|
||||
DEF_HELPER_2(ldq_kernel, void, i64, i64)
|
||||
DEF_HELPER_2(ldl_data, void, i64, i64)
|
||||
DEF_HELPER_2(ldq_data, void, i64, i64)
|
||||
DEF_HELPER_2(stl_raw, void, i64, i64)
|
||||
DEF_HELPER_2(stq_raw, void, i64, i64)
|
||||
DEF_HELPER_2(stl_c_raw, i64, i64, i64)
|
||||
DEF_HELPER_2(stq_c_raw, i64, i64, i64)
|
||||
DEF_HELPER_1(ldl_phys, i64, i64)
|
||||
DEF_HELPER_1(ldq_phys, i64, i64)
|
||||
DEF_HELPER_1(ldl_l_phys, i64, i64)
|
||||
DEF_HELPER_1(ldq_l_phys, i64, i64)
|
||||
DEF_HELPER_2(stl_phys, void, i64, i64)
|
||||
DEF_HELPER_2(stq_phys, void, i64, i64)
|
||||
DEF_HELPER_2(stl_c_phys, i64, i64, i64)
|
||||
DEF_HELPER_2(stq_c_phys, i64, i64, i64)
|
||||
|
||||
DEF_HELPER_FLAGS_0(tbia, TCG_CALL_CONST, void)
|
||||
DEF_HELPER_FLAGS_1(tbis, TCG_CALL_CONST, void, i64)
|
||||
#endif
|
||||
|
||||
#include "def-helper.h"
|
||||
|
87
target-alpha/machine.c
Normal file
87
target-alpha/machine.c
Normal file
@ -0,0 +1,87 @@
|
||||
#include "hw/hw.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
static int get_fpcr(QEMUFile *f, void *opaque, size_t size)
|
||||
{
|
||||
CPUAlphaState *env = opaque;
|
||||
cpu_alpha_store_fpcr(env, qemu_get_be64(f));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void put_fpcr(QEMUFile *f, void *opaque, size_t size)
|
||||
{
|
||||
CPUAlphaState *env = opaque;
|
||||
qemu_put_be64(f, cpu_alpha_load_fpcr(env));
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_fpcr = {
|
||||
.name = "fpcr",
|
||||
.get = get_fpcr,
|
||||
.put = put_fpcr,
|
||||
};
|
||||
|
||||
static VMStateField vmstate_cpu_fields[] = {
|
||||
VMSTATE_UINTTL_ARRAY(ir, CPUState, 31),
|
||||
VMSTATE_UINTTL_ARRAY(fir, CPUState, 31),
|
||||
/* Save the architecture value of the fpcr, not the internally
|
||||
expanded version. Since this architecture value does not
|
||||
exist in memory to be stored, this requires a but of hoop
|
||||
jumping. We want OFFSET=0 so that we effectively pass ENV
|
||||
to the helper functions, and we need to fill in the name by
|
||||
hand since there's no field of that name. */
|
||||
{
|
||||
.name = "fpcr",
|
||||
.version_id = 0,
|
||||
.size = sizeof(uint64_t),
|
||||
.info = &vmstate_fpcr,
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = 0
|
||||
},
|
||||
VMSTATE_UINTTL(pc, CPUState),
|
||||
VMSTATE_UINTTL(unique, CPUState),
|
||||
VMSTATE_UINTTL(lock_addr, CPUState),
|
||||
VMSTATE_UINTTL(lock_value, CPUState),
|
||||
/* Note that lock_st_addr is not saved; it is a temporary
|
||||
used during the execution of the st[lq]_c insns. */
|
||||
|
||||
VMSTATE_UINT8(ps, CPUState),
|
||||
VMSTATE_UINT8(intr_flag, CPUState),
|
||||
VMSTATE_UINT8(pal_mode, CPUState),
|
||||
VMSTATE_UINT8(fen, CPUState),
|
||||
|
||||
VMSTATE_UINT32(pcc_ofs, CPUState),
|
||||
|
||||
VMSTATE_UINTTL(trap_arg0, CPUState),
|
||||
VMSTATE_UINTTL(trap_arg1, CPUState),
|
||||
VMSTATE_UINTTL(trap_arg2, CPUState),
|
||||
|
||||
VMSTATE_UINTTL(exc_addr, CPUState),
|
||||
VMSTATE_UINTTL(palbr, CPUState),
|
||||
VMSTATE_UINTTL(ptbr, CPUState),
|
||||
VMSTATE_UINTTL(vptptr, CPUState),
|
||||
VMSTATE_UINTTL(sysval, CPUState),
|
||||
VMSTATE_UINTTL(usp, CPUState),
|
||||
|
||||
VMSTATE_UINTTL_ARRAY(shadow, CPUState, 8),
|
||||
VMSTATE_UINTTL_ARRAY(scratch, CPUState, 24),
|
||||
|
||||
VMSTATE_END_OF_LIST()
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.fields = vmstate_cpu_fields,
|
||||
};
|
||||
|
||||
void cpu_save(QEMUFile *f, void *opaque)
|
||||
{
|
||||
vmstate_save_state(f, &vmstate_cpu, opaque);
|
||||
}
|
||||
|
||||
int cpu_load(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
return vmstate_load_state(f, &vmstate_cpu, opaque, version_id);
|
||||
}
|
@ -25,6 +25,9 @@
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Exceptions processing helpers */
|
||||
|
||||
/* This should only be called from translate, via gen_excp.
|
||||
We expect that ENV->PC has already been updated. */
|
||||
void QEMU_NORETURN helper_excp(int excp, int error)
|
||||
{
|
||||
env->exception_index = excp;
|
||||
@ -32,10 +35,47 @@ void QEMU_NORETURN helper_excp (int excp, int error)
|
||||
cpu_loop_exit();
|
||||
}
|
||||
|
||||
static void do_restore_state(void *retaddr)
|
||||
{
|
||||
unsigned long pc = (unsigned long)retaddr;
|
||||
|
||||
if (pc) {
|
||||
TranslationBlock *tb = tb_find_pc(pc);
|
||||
if (tb) {
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
|
||||
static void QEMU_NORETURN dynamic_excp(int excp, int error)
|
||||
{
|
||||
env->exception_index = excp;
|
||||
env->error_code = error;
|
||||
do_restore_state(GETPC());
|
||||
cpu_loop_exit();
|
||||
}
|
||||
|
||||
static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
|
||||
{
|
||||
env->trap_arg0 = exc;
|
||||
env->trap_arg1 = mask;
|
||||
dynamic_excp(EXCP_ARITH, 0);
|
||||
}
|
||||
|
||||
uint64_t helper_load_pcc (void)
|
||||
{
|
||||
/* ??? This isn't a timer for which we have any rate info. */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* In system mode we have access to a decent high-resolution clock.
|
||||
In order to make OS-level time accounting work with the RPCC,
|
||||
present it with a well-timed clock fixed at 250MHz. */
|
||||
return (((uint64_t)env->pcc_ofs << 32)
|
||||
| (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
|
||||
#else
|
||||
/* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
|
||||
clock ticks. Also, don't bother taking PCC_OFS into account. */
|
||||
return (uint32_t)cpu_get_real_ticks();
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64_t helper_load_fpcr (void)
|
||||
@ -53,7 +93,7 @@ uint64_t helper_addqv (uint64_t op1, uint64_t op2)
|
||||
uint64_t tmp = op1;
|
||||
op1 += op2;
|
||||
if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return op1;
|
||||
}
|
||||
@ -63,7 +103,7 @@ uint64_t helper_addlv (uint64_t op1, uint64_t op2)
|
||||
uint64_t tmp = op1;
|
||||
op1 = (uint32_t)(op1 + op2);
|
||||
if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return op1;
|
||||
}
|
||||
@ -73,7 +113,7 @@ uint64_t helper_subqv (uint64_t op1, uint64_t op2)
|
||||
uint64_t res;
|
||||
res = op1 - op2;
|
||||
if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -83,7 +123,7 @@ uint64_t helper_sublv (uint64_t op1, uint64_t op2)
|
||||
uint32_t res;
|
||||
res = op1 - op2;
|
||||
if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -93,7 +133,7 @@ uint64_t helper_mullv (uint64_t op1, uint64_t op2)
|
||||
int64_t res = (int64_t)op1 * (int64_t)op2;
|
||||
|
||||
if (unlikely((int32_t)res != res)) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return (int64_t)((int32_t)res);
|
||||
}
|
||||
@ -105,7 +145,7 @@ uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
|
||||
muls64(&tl, &th, op1, op2);
|
||||
/* If th != 0 && th != -1, then we had an overflow */
|
||||
if (unlikely((th + 1) > 1)) {
|
||||
helper_excp(EXCP_ARITH, EXC_M_IOV);
|
||||
arith_excp(EXC_M_IOV, 0);
|
||||
}
|
||||
return tl;
|
||||
}
|
||||
@ -373,8 +413,6 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
|
||||
if (exc) {
|
||||
uint32_t hw_exc = 0;
|
||||
|
||||
env->ipr[IPR_EXC_MASK] |= 1ull << regno;
|
||||
|
||||
if (exc & float_flag_invalid) {
|
||||
hw_exc |= EXC_M_INV;
|
||||
}
|
||||
@ -390,7 +428,8 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
|
||||
if (exc & float_flag_inexact) {
|
||||
hw_exc |= EXC_M_INE;
|
||||
}
|
||||
helper_excp(EXCP_ARITH, hw_exc);
|
||||
|
||||
arith_excp(hw_exc, 1ull << regno);
|
||||
}
|
||||
}
|
||||
|
||||
@ -420,7 +459,7 @@ uint64_t helper_ieee_input(uint64_t val)
|
||||
if (env->fpcr_dnz) {
|
||||
val &= 1ull << 63;
|
||||
} else {
|
||||
helper_excp(EXCP_ARITH, EXC_M_UNF);
|
||||
arith_excp(EXC_M_UNF, 0);
|
||||
}
|
||||
}
|
||||
} else if (exp == 0x7ff) {
|
||||
@ -428,7 +467,7 @@ uint64_t helper_ieee_input(uint64_t val)
|
||||
/* ??? I'm not sure these exception bit flags are correct. I do
|
||||
know that the Linux kernel, at least, doesn't rely on them and
|
||||
just emulates the insn to figure out what exception to use. */
|
||||
helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
|
||||
arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
@ -445,12 +484,12 @@ uint64_t helper_ieee_input_cmp(uint64_t val)
|
||||
if (env->fpcr_dnz) {
|
||||
val &= 1ull << 63;
|
||||
} else {
|
||||
helper_excp(EXCP_ARITH, EXC_M_UNF);
|
||||
arith_excp(EXC_M_UNF, 0);
|
||||
}
|
||||
}
|
||||
} else if (exp == 0x7ff && frac) {
|
||||
/* NaN. */
|
||||
helper_excp(EXCP_ARITH, EXC_M_INV);
|
||||
arith_excp(EXC_M_INV, 0);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
@ -513,7 +552,7 @@ static inline float32 f_to_float32(uint64_t a)
|
||||
|
||||
if (unlikely(!exp && mant_sig)) {
|
||||
/* Reserved operands / Dirty zero */
|
||||
helper_excp(EXCP_OPCDEC, 0);
|
||||
dynamic_excp(EXCP_OPCDEC, 0);
|
||||
}
|
||||
|
||||
if (exp < 3) {
|
||||
@ -643,7 +682,7 @@ static inline float64 g_to_float64(uint64_t a)
|
||||
|
||||
if (!exp && mant_sig) {
|
||||
/* Reserved operands / Dirty zero */
|
||||
helper_excp(EXCP_OPCDEC, 0);
|
||||
dynamic_excp(EXCP_OPCDEC, 0);
|
||||
}
|
||||
|
||||
if (exp < 3) {
|
||||
@ -1156,187 +1195,122 @@ uint64_t helper_cvtqg (uint64_t a)
|
||||
|
||||
/* PALcode support special instructions */
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
void helper_hw_rei (void)
|
||||
{
|
||||
env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
|
||||
env->intr_flag = 0;
|
||||
env->lock_addr = -1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
}
|
||||
|
||||
void helper_hw_ret (uint64_t a)
|
||||
{
|
||||
env->pc = a & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = a & 1;
|
||||
env->intr_flag = 0;
|
||||
env->lock_addr = -1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
if ((a & 1) == 0) {
|
||||
env->pal_mode = 0;
|
||||
swap_shadow_regs(env);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t helper_mfpr (int iprn, uint64_t val)
|
||||
void helper_tbia(void)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
|
||||
val = tmp;
|
||||
|
||||
return val;
|
||||
tlb_flush(env, 1);
|
||||
}
|
||||
|
||||
void helper_mtpr (int iprn, uint64_t val)
|
||||
void helper_tbis(uint64_t p)
|
||||
{
|
||||
cpu_alpha_mtpr(env, iprn, val, NULL);
|
||||
tlb_flush_page(env, p);
|
||||
}
|
||||
|
||||
void helper_set_alt_mode (void)
|
||||
{
|
||||
env->saved_mode = env->ps & 0xC;
|
||||
env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
|
||||
}
|
||||
|
||||
void helper_restore_mode (void)
|
||||
{
|
||||
env->ps = (env->ps & ~0xC) | env->saved_mode;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Softmmu support */
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
|
||||
/* XXX: the two following helpers are pure hacks.
|
||||
* Hopefully, we emulate the PALcode, then we should never see
|
||||
* HW_LD / HW_ST instructions.
|
||||
*/
|
||||
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
|
||||
uint64_t helper_ldl_phys(uint64_t p)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
|
||||
if ((virtaddr & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(virtaddr, 0, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
return physaddr;
|
||||
return (int32_t)ldl_phys(p);
|
||||
}
|
||||
|
||||
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
|
||||
uint64_t helper_ldq_phys(uint64_t p)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
if ((virtaddr & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(virtaddr, 1, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
return physaddr;
|
||||
return ldq_phys(p);
|
||||
}
|
||||
|
||||
void helper_ldl_raw(uint64_t t0, uint64_t t1)
|
||||
uint64_t helper_ldl_l_phys(uint64_t p)
|
||||
{
|
||||
ldl_raw(t1, t0);
|
||||
env->lock_addr = p;
|
||||
return env->lock_value = (int32_t)ldl_phys(p);
|
||||
}
|
||||
|
||||
void helper_ldq_raw(uint64_t t0, uint64_t t1)
|
||||
uint64_t helper_ldq_l_phys(uint64_t p)
|
||||
{
|
||||
ldq_raw(t1, t0);
|
||||
env->lock_addr = p;
|
||||
return env->lock_value = ldl_phys(p);
|
||||
}
|
||||
|
||||
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
|
||||
void helper_stl_phys(uint64_t p, uint64_t v)
|
||||
{
|
||||
env->lock = t1;
|
||||
ldl_raw(t1, t0);
|
||||
stl_phys(p, v);
|
||||
}
|
||||
|
||||
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
|
||||
void helper_stq_phys(uint64_t p, uint64_t v)
|
||||
{
|
||||
env->lock = t1;
|
||||
ldl_raw(t1, t0);
|
||||
stq_phys(p, v);
|
||||
}
|
||||
|
||||
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
|
||||
uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
|
||||
{
|
||||
ldl_kernel(t1, t0);
|
||||
}
|
||||
uint64_t ret = 0;
|
||||
|
||||
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldq_kernel(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldl_data(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldl_data(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldq_data(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldq_data(t1, t0);
|
||||
}
|
||||
|
||||
void helper_stl_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
stl_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_stq_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
stq_raw(t1, t0);
|
||||
}
|
||||
|
||||
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
if (t1 == env->lock) {
|
||||
stl_raw(t1, t0);
|
||||
ret = 0;
|
||||
} else
|
||||
if (p == env->lock_addr) {
|
||||
int32_t old = ldl_phys(p);
|
||||
if (old == (int32_t)env->lock_value) {
|
||||
stl_phys(p, v);
|
||||
ret = 1;
|
||||
|
||||
env->lock = 1;
|
||||
}
|
||||
}
|
||||
env->lock_addr = -1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
|
||||
uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
|
||||
{
|
||||
uint64_t ret;
|
||||
uint64_t ret = 0;
|
||||
|
||||
if (t1 == env->lock) {
|
||||
stq_raw(t1, t0);
|
||||
ret = 0;
|
||||
} else
|
||||
if (p == env->lock_addr) {
|
||||
uint64_t old = ldq_phys(p);
|
||||
if (old == env->lock_value) {
|
||||
stq_phys(p, v);
|
||||
ret = 1;
|
||||
|
||||
env->lock = 1;
|
||||
}
|
||||
}
|
||||
env->lock_addr = -1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
|
||||
int is_user, void *retaddr)
|
||||
{
|
||||
uint64_t pc;
|
||||
uint32_t insn;
|
||||
|
||||
do_restore_state(retaddr);
|
||||
|
||||
pc = env->pc;
|
||||
insn = ldl_code(pc);
|
||||
|
||||
env->trap_arg0 = addr;
|
||||
env->trap_arg1 = insn >> 26; /* opcode */
|
||||
env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
|
||||
helper_excp(EXCP_UNALIGN, 0);
|
||||
}
|
||||
|
||||
void QEMU_NORETURN do_unassigned_access(target_phys_addr_t addr, int is_write,
|
||||
int is_exec, int unused, int size)
|
||||
{
|
||||
env->trap_arg0 = addr;
|
||||
env->trap_arg1 = is_write;
|
||||
dynamic_excp(EXCP_MCHK, 0);
|
||||
}
|
||||
|
||||
#define MMUSUFFIX _mmu
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
#define SHIFT 0
|
||||
#include "softmmu_template.h"
|
||||
@ -1356,9 +1330,7 @@ uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
unsigned long pc;
|
||||
int ret;
|
||||
|
||||
/* XXX: hack to restore env in all cases, even if not called from
|
||||
@ -1366,21 +1338,11 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (!likely(ret == 0)) {
|
||||
if (likely(retaddr)) {
|
||||
/* now we have a real cpu fault */
|
||||
pc = (unsigned long)retaddr;
|
||||
tb = tb_find_pc(pc);
|
||||
if (likely(tb)) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
}
|
||||
if (unlikely(ret != 0)) {
|
||||
do_restore_state(retaddr);
|
||||
/* Exception index and error code are already set */
|
||||
cpu_loop_exit();
|
||||
}
|
||||
env = saved_env;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user