OpenRISC cleanups and Fixes for QEMU 3.0

Mostly patches from Richard Henderson fixing multiple things:
  * Fix singlestepping in GDB.
  * Use more TB linking.
  * Fixes to exit TB after updating SPRs to enable registering of state
    changes.
  * Significant optimizations and refactors to the TLB
  * Split out disassembly from translation.
  * Add qemu-or1k to qemu-binfmt-conf.sh.
  * Implement signal handling for linux-user.
 
 Then there are a few fixups from me:
  * Fix delay slot detections to match hardware, this was masking a bug
    in the linus kernel.
  * Fix stores to the PIC mask register
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbO32qAAoJEMOzHC1eZifkdHwP/2zV/dE3A0XvEynghJU4XeVe
 KlNRupCjp2civk9d9E+BJwIOVDMPfBQbBKfGC2fjzBGOuop8ZjUvvUuazNTEQoov
 9RfeXPMkP8xJUzGp02Gl87ZcMY9ZXJrqlPb2BaJ//8f/E0CF+91ODnkeLK62UXnb
 EbBCf5IlJy/B6Fp9icfdE09/nYx6SmQHPJZo9nC8xiWNZ8LewXn+DWGH81EHd8w1
 j99FV5ijImwB/LNOP6aVelyyKV9ZpInI6ZqC1LztWWaZftJ42TuvUq4vboP1P2s9
 vC9RV5oWl3/DL9HQxEphrynqKNvrxcceoQhxXirEzbLeYG83Tx1ed2a7J3x83gtY
 reChNmnwRuuchCot3cK4xDn2e0dY87dT24wtBM9HNmLsGgEzudZuGPwdlHrBoRFP
 o3exRItbfFr6SFdgUZaMjeC0vRSVU/FPqPRswJESWelEEMCi1R/CeQFKaw8BmaOG
 rw9Ed1rtX23R1Ce/ggEQgkxh2cWGyV1Tc0q5M09UDDmHKq0pd27d7CLlVMYsqZqk
 8guPjPzsYP12vNFTjx6tC8inOwJalK7kDVJv1a+c/bpyqaulcT22o7ck8rqlCZbU
 wpQbhAAGbbVrwnjQevO11MZsXk+6FANw6KIXxEDdDVbfqQ+WLtHOfXsUkG/xVUuR
 K1hiEeYKl77HCrDFkhqB
 =mbu2
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/shorne/tags/pull-or-20180703' into staging

OpenRISC cleanups and Fixes for QEMU 3.0

Mostly patches from Richard Henderson fixing multiple things:
 * Fix singlestepping in GDB.
 * Use more TB linking.
 * Fixes to exit TB after updating SPRs to enable registering of state
   changes.
 * Significant optimizations and refactors to the TLB
 * Split out disassembly from translation.
 * Add qemu-or1k to qemu-binfmt-conf.sh.
 * Implement signal handling for linux-user.

Then there are a few fixups from me:
 * Fix delay slot detections to match hardware, this was masking a bug
   in the linus kernel.
 * Fix stores to the PIC mask register

# gpg: Signature made Tue 03 Jul 2018 14:44:10 BST
# gpg:                using RSA key C3B31C2D5E6627E4
# gpg: Good signature from "Stafford Horne <shorne@gmail.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: D9C4 7354 AEF8 6C10 3A25  EFF1 C3B3 1C2D 5E66 27E4

* remotes/shorne/tags/pull-or-20180703: (25 commits)
  target/openrisc: Fix writes to interrupt mask register
  target/openrisc: Fix delay slot exception flag to match spec
  linux-user: Fix struct sigaltstack for openrisc
  linux-user: Implement signals for openrisc
  target/openrisc: Add support in scripts/qemu-binfmt-conf.sh
  target/openrisc: Reorg tlb lookup
  target/openrisc: Increase the TLB size
  target/openrisc: Stub out handle_mmu_fault for softmmu
  target/openrisc: Use identical sizes for ITLB and DTLB
  target/openrisc: Fix cpu_mmu_index
  target/openrisc: Fix tlb flushing in mtspr
  target/openrisc: Reduce tlb to a single dimension
  target/openrisc: Merge mmu_helper.c into mmu.c
  target/openrisc: Remove indirect function calls for mmu
  target/openrisc: Merge tlb allocation into CPUOpenRISCState
  target/openrisc: Form the spr index from tcg
  target/openrisc: Exit the TB after l.mtspr
  target/openrisc: Split out is_user
  target/openrisc: Link more translation blocks
  target/openrisc: Fix singlestep_enabled
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-07-03 16:04:41 +01:00
commit f988c7e191
17 changed files with 604 additions and 754 deletions

View File

@ -21,124 +21,69 @@
#include "signal-common.h"
#include "linux-user/trace.h"
struct target_sigcontext {
typedef struct target_sigcontext {
struct target_pt_regs regs;
abi_ulong oldmask;
abi_ulong usp;
};
} target_sigcontext;
struct target_ucontext {
typedef struct target_ucontext {
abi_ulong tuc_flags;
abi_ulong tuc_link;
target_stack_t tuc_stack;
struct target_sigcontext tuc_mcontext;
target_sigcontext tuc_mcontext;
target_sigset_t tuc_sigmask; /* mask last for extensibility */
};
} target_ucontext;
struct target_rt_sigframe {
abi_ulong pinfo;
uint64_t puc;
typedef struct target_rt_sigframe {
struct target_siginfo info;
struct target_sigcontext sc;
struct target_ucontext uc;
unsigned char retcode[16]; /* trampoline code */
};
target_ucontext uc;
uint32_t retcode[4]; /* trampoline code */
} target_rt_sigframe;
/* This is the asm-generic/ucontext.h version */
#if 0
static int restore_sigcontext(CPUOpenRISCState *regs,
struct target_sigcontext *sc)
static void restore_sigcontext(CPUOpenRISCState *env, target_sigcontext *sc)
{
unsigned int err = 0;
unsigned long old_usp;
int i;
abi_ulong v;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* restore the regs from &sc->regs (same as sc, since regs is first)
* (sc is already checked for VERIFY_READ since the sigframe was
* checked in sys_sigreturn previously)
*/
if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
goto badframe;
for (i = 0; i < 32; ++i) {
__get_user(v, &sc->regs.gpr[i]);
cpu_set_gpr(env, i, v);
}
__get_user(env->pc, &sc->regs.pc);
/* make sure the U-flag is set so user-mode cannot fool us */
regs->sr &= ~SR_SM;
/* restore the old USP as it was before we stacked the sc etc.
* (we cannot just pop the sigcontext since we aligned the sp and
* stuff after pushing it)
*/
__get_user(old_usp, &sc->usp);
phx_signal("old_usp 0x%lx", old_usp);
__PHX__ REALLY /* ??? */
wrusp(old_usp);
regs->gpr[1] = old_usp;
/* TODO: the other ports use regs->orig_XX to disable syscall checks
* after this completes, but we don't use that mechanism. maybe we can
* use it now ?
*/
return err;
badframe:
return 1;
/* Make sure the supervisor flag is clear. */
__get_user(v, &sc->regs.sr);
cpu_set_sr(env, v & ~SR_SM);
}
#endif
/* Set up a signal frame. */
static void setup_sigcontext(struct target_sigcontext *sc,
CPUOpenRISCState *regs,
unsigned long mask)
static void setup_sigcontext(target_sigcontext *sc, CPUOpenRISCState *env)
{
unsigned long usp = cpu_get_gpr(regs, 1);
int i;
/* copy the regs. they are first in sc so we can use sc directly */
for (i = 0; i < 32; ++i) {
__put_user(cpu_get_gpr(env, i), &sc->regs.gpr[i]);
}
/*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
/* Set the frametype to CRIS_FRAME_NORMAL for the execution of
the signal handler. The frametype will be restored to its previous
value in restore_sigcontext. */
/*regs->frametype = CRIS_FRAME_NORMAL;*/
/* then some other stuff */
__put_user(mask, &sc->oldmask);
__put_user(usp, &sc->usp);
}
static inline unsigned long align_sigframe(unsigned long sp)
{
return sp & ~3UL;
__put_user(env->pc, &sc->regs.pc);
__put_user(cpu_get_sr(env), &sc->regs.sr);
}
static inline abi_ulong get_sigframe(struct target_sigaction *ka,
CPUOpenRISCState *regs,
CPUOpenRISCState *env,
size_t frame_size)
{
unsigned long sp = get_sp_from_cpustate(regs);
int onsigstack = on_sig_stack(sp);
target_ulong sp = get_sp_from_cpustate(env);
/* redzone */
sp = target_sigsp(sp, ka);
sp = align_sigframe(sp - frame_size);
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
/* Honor redzone now. If we swap to signal stack, no need to waste
* the 128 bytes by subtracting afterward.
*/
sp -= 128;
if (onsigstack && !likely(on_sig_stack(sp))) {
return -1L;
}
sp = target_sigsp(sp, ka);
sp -= frame_size;
sp = QEMU_ALIGN_DOWN(sp, 4);
return sp;
}
@ -147,11 +92,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUOpenRISCState *env)
{
int err = 0;
abi_ulong frame_addr;
unsigned long return_ip;
struct target_rt_sigframe *frame;
abi_ulong info_addr, uc_addr;
target_rt_sigframe *frame;
int i;
frame_addr = get_sigframe(ka, env, sizeof(*frame));
trace_user_setup_rt_frame(env, frame_addr);
@ -159,47 +102,37 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
goto give_sigsegv;
}
info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
__put_user(info_addr, &frame->pinfo);
uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
__put_user(uc_addr, &frame->puc);
if (ka->sa_flags & SA_SIGINFO) {
tswap_siginfo(&frame->info, info);
}
/*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
target_save_altstack(&frame->uc.tuc_stack, env);
setup_sigcontext(&frame->sc, env, set->sig[0]);
/*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
/* trampoline - the desired return ip is the retcode itself */
return_ip = (unsigned long)&frame->retcode;
/* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
__put_user(0xa960, (short *)(frame->retcode + 0));
__put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
__put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
__put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
if (err) {
goto give_sigsegv;
setup_sigcontext(&frame->uc.tuc_mcontext, env);
for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
/* TODO what is the current->exec_domain stuff and invmap ? */
/* This is l.ori r11,r0,__NR_sigreturn; l.sys 1; l.nop; l.nop */
__put_user(0xa9600000 | TARGET_NR_rt_sigreturn, frame->retcode + 0);
__put_user(0x20000001, frame->retcode + 1);
__put_user(0x15000000, frame->retcode + 2);
__put_user(0x15000000, frame->retcode + 3);
/* Set up registers for signal handler */
env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
/* actually move the usp to reflect the stacked frame */
cpu_set_gpr(env, 1, (unsigned long)frame);
cpu_set_gpr(env, 9, frame_addr + offsetof(target_rt_sigframe, retcode));
cpu_set_gpr(env, 3, sig);
cpu_set_gpr(env, 4, frame_addr + offsetof(target_rt_sigframe, info));
cpu_set_gpr(env, 5, frame_addr + offsetof(target_rt_sigframe, uc));
cpu_set_gpr(env, 1, frame_addr);
/* For debugging convenience, set ppc to the insn that faulted. */
env->ppc = env->pc;
/* When setting the PC for the signal handler, exit delay slot. */
env->pc = ka->_sa_handler;
env->dflag = 0;
return;
give_sigsegv:
@ -207,16 +140,34 @@ give_sigsegv:
force_sigsegv(sig);
}
long do_sigreturn(CPUOpenRISCState *env)
{
trace_user_do_sigreturn(env, 0);
fprintf(stderr, "do_sigreturn: not implemented\n");
return -TARGET_ENOSYS;
}
long do_rt_sigreturn(CPUOpenRISCState *env)
{
abi_ulong frame_addr = get_sp_from_cpustate(env);
target_rt_sigframe *frame;
sigset_t set;
trace_user_do_rt_sigreturn(env, 0);
fprintf(stderr, "do_rt_sigreturn: not implemented\n");
return -TARGET_ENOSYS;
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
goto badframe;
}
if (frame_addr & 3) {
goto badframe;
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
set_sigmask(&set);
restore_sigcontext(env, &frame->uc.tuc_mcontext);
if (do_sigaltstack(frame_addr + offsetof(target_rt_sigframe, uc.tuc_stack),
0, frame_addr) == -EFAULT) {
goto badframe;
}
unlock_user_struct(frame, frame_addr, 0);
return cpu_get_gpr(env, 11);
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
return 0;
}

View File

@ -5,8 +5,8 @@
typedef struct target_sigaltstack {
abi_long ss_sp;
abi_int ss_flags;
abi_ulong ss_size;
abi_long ss_flags;
} target_stack_t;
/* sigaltstack controls */

View File

@ -1,27 +1,15 @@
#ifndef OPENRISC_TARGET_SYSCALL_H
#define OPENRISC_TARGET_SYSCALL_H
/* Note that in linux/arch/openrisc/include/uapi/asm/ptrace.h,
* this is called user_regs_struct. Given that this is what
* is used within struct sigcontext we need this definition.
* However, elfload.c wants this name.
*/
struct target_pt_regs {
union {
struct {
/* Named registers */
uint32_t sr; /* Stored in place of r0 */
target_ulong sp; /* r1 */
};
struct {
/* Old style */
target_ulong offset[2];
target_ulong gprs[30];
};
struct {
/* New style */
target_ulong gpr[32];
};
};
target_ulong pc;
target_ulong orig_gpr11; /* For restarting system calls */
uint32_t syscallno; /* Syscall number (used by strace) */
target_ulong dummy; /* Cheap alignment fix */
abi_ulong gpr[32];
abi_ulong pc;
abi_ulong sr;
};
#define UNAME_MACHINE "openrisc"

View File

@ -236,7 +236,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
return 0;
}
#if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
#if !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/

View File

@ -1,10 +1,10 @@
#!/bin/sh
# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390/HPPA/Xtensa/microblaze
# program execution by the kernel
# Enable automatic program execution by the kernel.
qemu_target_list="i386 i486 alpha arm armeb sparc32plus ppc ppc64 ppc64le m68k \
mips mipsel mipsn32 mipsn32el mips64 mips64el \
sh4 sh4eb s390x aarch64 aarch64_be hppa riscv32 riscv64 xtensa xtensaeb microblaze microblazeel"
sh4 sh4eb s390x aarch64 aarch64_be hppa riscv32 riscv64 xtensa xtensaeb \
microblaze microblazeel or1k"
i386_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00'
i386_mask='\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
@ -124,6 +124,10 @@ microblazeel_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\
microblazeel_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
microblazeel_family=microblazeel
or1k_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x5c'
or1k_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
or1k_family=or1k
qemu_get_family() {
cpu=${HOST_ARCH:-$(uname -m)}
case "$cpu" in

View File

@ -1,7 +1,7 @@
obj-$(CONFIG_SOFTMMU) += machine.o
obj-y += cpu.o exception.o interrupt.o mmu.o translate.o
obj-y += cpu.o exception.o interrupt.o mmu.o translate.o disas.o
obj-y += exception_helper.o fpu_helper.o \
interrupt_helper.o mmu_helper.o sys_helper.o
interrupt_helper.o sys_helper.o
obj-y += gdbstub.o
DECODETREE = $(SRC_PATH)/scripts/decodetree.py
@ -12,3 +12,4 @@ target/openrisc/decode.inc.c: \
$(PYTHON) $(DECODETREE) -o $@ $<, "GEN", $(TARGET_DIR)$@)
target/openrisc/translate.o: target/openrisc/decode.inc.c
target/openrisc/disas.o: target/openrisc/decode.inc.c

View File

@ -27,6 +27,7 @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
cpu->env.pc = value;
cpu->env.dflag = 0;
}
static bool openrisc_cpu_has_work(CPUState *cs)
@ -35,6 +36,11 @@ static bool openrisc_cpu_has_work(CPUState *cs)
CPU_INTERRUPT_TIMER);
}
static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info)
{
info->print_insn = print_insn_or1k;
}
/* CPUClass::reset() */
static void openrisc_cpu_reset(CPUState *s)
{
@ -52,8 +58,10 @@ static void openrisc_cpu_reset(CPUState *s)
cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP |
UPR_PMP;
cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2)) | (DMMUCFGR_NTS & (6 << 2));
cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2)) | (IMMUCFGR_NTS & (6 << 2));
cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2))
| (DMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2))
| (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
#ifndef CONFIG_USER_ONLY
cpu->env.picmr = 0x00000000;
@ -87,10 +95,6 @@ static void openrisc_cpu_initfn(Object *obj)
OpenRISCCPU *cpu = OPENRISC_CPU(obj);
cs->env_ptr = &cpu->env;
#ifndef CONFIG_USER_ONLY
cpu_openrisc_mmu_init(cpu);
#endif
}
/* CPU models */
@ -152,6 +156,7 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
#endif
cc->gdb_num_core_regs = 32 + 3;
cc->tcg_initialize = openrisc_translate_init;
cc->disas_set_info = openrisc_disas_set_info;
}
/* Sort alphabetically by type name, except for "any". */

View File

@ -222,12 +222,8 @@ enum {
/* TLB size */
enum {
DTLB_WAYS = 1,
DTLB_SIZE = 64,
DTLB_MASK = (DTLB_SIZE-1),
ITLB_WAYS = 1,
ITLB_SIZE = 64,
ITLB_MASK = (ITLB_SIZE-1),
TLB_SIZE = 128,
TLB_MASK = TLB_SIZE - 1,
};
/* TLB prot */
@ -241,14 +237,6 @@ enum {
UXE = (1 << 7),
};
/* check if tlb available */
enum {
TLBRET_INVALID = -3,
TLBRET_NOMATCH = -2,
TLBRET_BADADDR = -1,
TLBRET_MATCH = 0
};
typedef struct OpenRISCTLBEntry {
uint32_t mr;
uint32_t tr;
@ -256,8 +244,8 @@ typedef struct OpenRISCTLBEntry {
#ifndef CONFIG_USER_ONLY
typedef struct CPUOpenRISCTLBContext {
OpenRISCTLBEntry itlb[ITLB_WAYS][ITLB_SIZE];
OpenRISCTLBEntry dtlb[DTLB_WAYS][DTLB_SIZE];
OpenRISCTLBEntry itlb[TLB_SIZE];
OpenRISCTLBEntry dtlb[TLB_SIZE];
int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu,
hwaddr *physical,
@ -301,6 +289,10 @@ typedef struct CPUOpenRISCState {
uint32_t dflag; /* In delay slot (boolean) */
#ifndef CONFIG_USER_ONLY
CPUOpenRISCTLBContext tlb;
#endif
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
@ -310,8 +302,6 @@ typedef struct CPUOpenRISCState {
uint32_t cpucfgr; /* CPU configure register */
#ifndef CONFIG_USER_ONLY
CPUOpenRISCTLBContext * tlb;
QEMUTimer *timer;
uint32_t ttmr; /* Timer tick mode register */
int is_counting;
@ -358,6 +348,7 @@ void openrisc_translate_init(void);
int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
#define cpu_list cpu_openrisc_list
#define cpu_signal_handler cpu_openrisc_signal_handler
@ -376,17 +367,6 @@ void cpu_openrisc_count_update(OpenRISCCPU *cpu);
void cpu_openrisc_timer_update(OpenRISCCPU *cpu);
void cpu_openrisc_count_start(OpenRISCCPU *cpu);
void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
void cpu_openrisc_mmu_init(OpenRISCCPU *cpu);
int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw);
#endif
#define OPENRISC_CPU_TYPE_SUFFIX "-" TYPE_OPENRISC_CPU
@ -395,9 +375,12 @@ int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
#include "exec/cpu-all.h"
#define TB_FLAGS_DFLAG 1
#define TB_FLAGS_R0_0 2
#define TB_FLAGS_SM SR_SM
#define TB_FLAGS_DME SR_DME
#define TB_FLAGS_IME SR_IME
#define TB_FLAGS_OVE SR_OVE
#define TB_FLAGS_DFLAG 2 /* reuse SR_TEE */
#define TB_FLAGS_R0_0 4 /* reuse SR_IEE */
static inline uint32_t cpu_get_gpr(const CPUOpenRISCState *env, int i)
{
@ -415,17 +398,21 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
{
*pc = env->pc;
*cs_base = 0;
*flags = (env->dflag
| (cpu_get_gpr(env, 0) == 0 ? TB_FLAGS_R0_0 : 0)
| (env->sr & SR_OVE));
*flags = (env->dflag ? TB_FLAGS_DFLAG : 0)
| (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
| (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE));
}
static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
{
if (!(env->sr & SR_IME)) {
return MMU_NOMMU_IDX;
int ret = MMU_NOMMU_IDX; /* mmu is disabled */
if (env->sr & (ifetch ? SR_IME : SR_DME)) {
/* The mmu is enabled; test supervisor state. */
ret = env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX;
}
return (env->sr & SR_SM) == 0 ? MMU_USER_IDX : MMU_SUPERVISOR_IDX;
return ret;
}
static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env)

170
target/openrisc/disas.c Normal file
View File

@ -0,0 +1,170 @@
/*
* OpenRISC disassembler
*
* Copyright (c) 2018 Richard Henderson <rth@twiddle.net>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "disas/bfd.h"
#include "qemu/bitops.h"
#include "cpu.h"
typedef disassemble_info DisasContext;
/* Include the auto-generated decoder. */
#include "decode.inc.c"
#define output(mnemonic, format, ...) \
(info->fprintf_func(info->stream, "%-9s " format, \
mnemonic, ##__VA_ARGS__))
int print_insn_or1k(bfd_vma addr, disassemble_info *info)
{
bfd_byte buffer[4];
uint32_t insn;
int status;
status = info->read_memory_func(addr, buffer, 4, info);
if (status != 0) {
info->memory_error_func(status, addr, info);
return -1;
}
insn = bfd_getb32(buffer);
if (!decode(info, insn)) {
output(".long", "%#08x", insn);
}
return 4;
}
#define INSN(opcode, format, ...) \
static bool trans_l_##opcode(disassemble_info *info, \
arg_l_##opcode *a, uint32_t insn) \
{ \
output("l." #opcode, format, ##__VA_ARGS__); \
return true; \
}
INSN(add, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(addc, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(sub, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(and, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(or, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(xor, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(sll, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(srl, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(sra, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(ror, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(exths, "r%d, r%d", a->d, a->a)
INSN(extbs, "r%d, r%d", a->d, a->a)
INSN(exthz, "r%d, r%d", a->d, a->a)
INSN(extbz, "r%d, r%d", a->d, a->a)
INSN(cmov, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(ff1, "r%d, r%d", a->d, a->a)
INSN(fl1, "r%d, r%d", a->d, a->a)
INSN(mul, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(mulu, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(div, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(divu, "r%d, r%d, r%d", a->d, a->a, a->b)
INSN(muld, "r%d, r%d", a->a, a->b)
INSN(muldu, "r%d, r%d", a->a, a->b)
INSN(j, "%d", a->n)
INSN(jal, "%d", a->n)
INSN(bf, "%d", a->n)
INSN(bnf, "%d", a->n)
INSN(jr, "r%d", a->b)
INSN(jalr, "r%d", a->b)
INSN(lwa, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lwz, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lws, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lbz, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lbs, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lhz, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(lhs, "r%d, %d(r%d)", a->d, a->i, a->a)
INSN(swa, "%d(r%d), r%d", a->i, a->a, a->b)
INSN(sw, "%d(r%d), r%d", a->i, a->a, a->b)
INSN(sb, "%d(r%d), r%d", a->i, a->a, a->b)
INSN(sh, "%d(r%d), r%d", a->i, a->a, a->b)
INSN(nop, "")
INSN(addi, "r%d, r%d, %d", a->d, a->a, a->i)
INSN(addic, "r%d, r%d, %d", a->d, a->a, a->i)
INSN(muli, "r%d, r%d, %d", a->d, a->a, a->i)
INSN(maci, "r%d, %d", a->a, a->i)
INSN(andi, "r%d, r%d, %d", a->d, a->a, a->k)
INSN(ori, "r%d, r%d, %d", a->d, a->a, a->k)
INSN(xori, "r%d, r%d, %d", a->d, a->a, a->i)
INSN(mfspr, "r%d, r%d, %d", a->d, a->a, a->k)
INSN(mtspr, "r%d, r%d, %d", a->a, a->b, a->k)
INSN(mac, "r%d, r%d", a->a, a->b)
INSN(msb, "r%d, r%d", a->a, a->b)
INSN(macu, "r%d, r%d", a->a, a->b)
INSN(msbu, "r%d, r%d", a->a, a->b)
INSN(slli, "r%d, r%d, %d", a->d, a->a, a->l)
INSN(srli, "r%d, r%d, %d", a->d, a->a, a->l)
INSN(srai, "r%d, r%d, %d", a->d, a->a, a->l)
INSN(rori, "r%d, r%d, %d", a->d, a->a, a->l)
INSN(movhi, "r%d, %d", a->d, a->k)
INSN(macrc, "r%d", a->d)
INSN(sfeq, "r%d, r%d", a->a, a->b)
INSN(sfne, "r%d, r%d", a->a, a->b)
INSN(sfgtu, "r%d, r%d", a->a, a->b)
INSN(sfgeu, "r%d, r%d", a->a, a->b)
INSN(sfltu, "r%d, r%d", a->a, a->b)
INSN(sfleu, "r%d, r%d", a->a, a->b)
INSN(sfgts, "r%d, r%d", a->a, a->b)
INSN(sfges, "r%d, r%d", a->a, a->b)
INSN(sflts, "r%d, r%d", a->a, a->b)
INSN(sfles, "r%d, r%d", a->a, a->b)
INSN(sfeqi, "r%d, %d", a->a, a->i)
INSN(sfnei, "r%d, %d", a->a, a->i)
INSN(sfgtui, "r%d, %d", a->a, a->i)
INSN(sfgeui, "r%d, %d", a->a, a->i)
INSN(sfltui, "r%d, %d", a->a, a->i)
INSN(sfleui, "r%d, %d", a->a, a->i)
INSN(sfgtsi, "r%d, %d", a->a, a->i)
INSN(sfgesi, "r%d, %d", a->a, a->i)
INSN(sfltsi, "r%d, %d", a->a, a->i)
INSN(sflesi, "r%d, %d", a->a, a->i)
INSN(sys, "%d", a->k)
INSN(trap, "%d", a->k)
INSN(msync, "")
INSN(psync, "")
INSN(csync, "")
INSN(rfe, "")
#define FP_INSN(opcode, suffix, format, ...) \
static bool trans_lf_##opcode##_##suffix(disassemble_info *info, \
arg_lf_##opcode##_##suffix *a, uint32_t insn) \
{ \
output("lf." #opcode "." #suffix, format, ##__VA_ARGS__); \
return true; \
}
FP_INSN(add, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(sub, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(mul, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(div, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(rem, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(itof, s, "r%d, r%d", a->d, a->a)
FP_INSN(ftoi, s, "r%d, r%d", a->d, a->a)
FP_INSN(madd, s, "r%d, r%d, r%d", a->d, a->a, a->b)
FP_INSN(sfeq, s, "r%d, r%d", a->a, a->b)
FP_INSN(sfne, s, "r%d, r%d", a->a, a->b)
FP_INSN(sfgt, s, "r%d, r%d", a->a, a->b)
FP_INSN(sfge, s, "r%d, r%d", a->a, a->b)
FP_INSN(sflt, s, "r%d, r%d", a->a, a->b)
FP_INSN(sfle, s, "r%d, r%d", a->a, a->b)

View File

@ -56,5 +56,5 @@ FOP_CMP(le)
DEF_HELPER_FLAGS_1(rfe, 0, void, env)
/* sys */
DEF_HELPER_FLAGS_4(mtspr, 0, void, env, tl, tl, tl)
DEF_HELPER_FLAGS_4(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl, tl)
DEF_HELPER_FLAGS_3(mtspr, 0, void, env, tl, tl)
DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl)

View File

@ -32,29 +32,22 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
CPUOpenRISCState *env = &cpu->env;
int exception = cs->exception_index;
env->epcr = env->pc;
if (env->dflag) {
env->dflag = 0;
env->sr |= SR_DSX;
env->epcr -= 4;
} else {
env->sr &= ~SR_DSX;
}
if (cs->exception_index == EXCP_SYSCALL) {
if (exception == EXCP_SYSCALL) {
env->epcr += 4;
}
/* When we have an illegal instruction the error effective address
shall be set to the illegal instruction address. */
if (cs->exception_index == EXCP_ILLEGAL) {
if (exception == EXCP_ILLEGAL) {
env->eear = env->pc;
}
/* For machine-state changed between user-mode and supervisor mode,
we need flush TLB when we enter&exit EXCP. */
tlb_flush(cs);
/* During exceptions esr is populared with the pre-exception sr. */
env->esr = cpu_get_sr(env);
/* In parallel sr is updated to disable mmu, interrupts, timers and
set the delay slot exception flag. */
env->sr &= ~SR_DME;
env->sr &= ~SR_IME;
env->sr |= SR_SM;
@ -62,12 +55,38 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
env->sr &= ~SR_TEE;
env->pmr &= ~PMR_DME;
env->pmr &= ~PMR_SME;
env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
env->lock_addr = -1;
if (cs->exception_index > 0 && cs->exception_index < EXCP_NR) {
hwaddr vect_pc = cs->exception_index << 8;
/* Set/clear dsx to indicate if we are in a delay slot exception. */
if (env->dflag) {
env->dflag = 0;
env->sr |= SR_DSX;
env->epcr -= 4;
} else {
env->sr &= ~SR_DSX;
}
if (exception > 0 && exception < EXCP_NR) {
static const char * const int_name[EXCP_NR] = {
[EXCP_RESET] = "RESET",
[EXCP_BUSERR] = "BUSERR (bus error)",
[EXCP_DPF] = "DFP (data protection fault)",
[EXCP_IPF] = "IPF (code protection fault)",
[EXCP_TICK] = "TICK (timer interrupt)",
[EXCP_ALIGN] = "ALIGN",
[EXCP_ILLEGAL] = "ILLEGAL",
[EXCP_INT] = "INT (device interrupt)",
[EXCP_DTLBMISS] = "DTLBMISS (data tlb miss)",
[EXCP_ITLBMISS] = "ITLBMISS (code tlb miss)",
[EXCP_RANGE] = "RANGE",
[EXCP_SYSCALL] = "SYSCALL",
[EXCP_FPE] = "FPE",
[EXCP_TRAP] = "TRAP",
};
qemu_log_mask(CPU_LOG_INT, "INT: %s\n", int_name[exception]);
hwaddr vect_pc = exception << 8;
if (env->cpucfgr & CPUCFGR_EVBARP) {
vect_pc |= env->evbar;
}
@ -76,7 +95,7 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
}
env->pc = vect_pc;
} else {
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
cpu_abort(cs, "Unhandled exception 0x%x\n", exception);
}
#endif

View File

@ -25,36 +25,7 @@
void HELPER(rfe)(CPUOpenRISCState *env)
{
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
#ifndef CONFIG_USER_ONLY
int need_flush_tlb = (cpu->env.sr & (SR_SM | SR_IME | SR_DME)) ^
(cpu->env.esr & (SR_SM | SR_IME | SR_DME));
#endif
cpu->env.pc = cpu->env.epcr;
cpu_set_sr(&cpu->env, cpu->env.esr);
cpu->env.lock_addr = -1;
#ifndef CONFIG_USER_ONLY
if (cpu->env.sr & SR_DME) {
cpu->env.tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_data;
} else {
cpu->env.tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_nommu;
}
if (cpu->env.sr & SR_IME) {
cpu->env.tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_code;
} else {
cpu->env.tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_nommu;
}
if (need_flush_tlb) {
tlb_flush(cs);
}
#endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
env->pc = env->epcr;
env->lock_addr = -1;
cpu_set_sr(env, env->esr);
}

View File

@ -24,31 +24,6 @@
#include "hw/boards.h"
#include "migration/cpu.h"
static int env_post_load(void *opaque, int version_id)
{
CPUOpenRISCState *env = opaque;
/* Restore MMU handlers */
if (env->sr & SR_DME) {
env->tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_data;
} else {
env->tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_nommu;
}
if (env->sr & SR_IME) {
env->tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_code;
} else {
env->tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_nommu;
}
return 0;
}
static const VMStateDescription vmstate_tlb_entry = {
.name = "tlb_entry",
.version_id = 1,
@ -63,24 +38,17 @@ static const VMStateDescription vmstate_tlb_entry = {
static const VMStateDescription vmstate_cpu_tlb = {
.name = "cpu_tlb",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_2DARRAY(itlb, CPUOpenRISCTLBContext,
ITLB_WAYS, ITLB_SIZE, 0,
VMSTATE_STRUCT_ARRAY(itlb, CPUOpenRISCTLBContext, TLB_SIZE, 0,
vmstate_tlb_entry, OpenRISCTLBEntry),
VMSTATE_STRUCT_2DARRAY(dtlb, CPUOpenRISCTLBContext,
DTLB_WAYS, DTLB_SIZE, 0,
VMSTATE_STRUCT_ARRAY(dtlb, CPUOpenRISCTLBContext, TLB_SIZE, 0,
vmstate_tlb_entry, OpenRISCTLBEntry),
VMSTATE_END_OF_LIST()
}
};
#define VMSTATE_CPU_TLB(_f, _s) \
VMSTATE_STRUCT_POINTER(_f, _s, vmstate_cpu_tlb, CPUOpenRISCTLBContext)
static int get_sr(QEMUFile *f, void *opaque, size_t size, VMStateField *field)
{
CPUOpenRISCState *env = opaque;
@ -106,7 +74,6 @@ static const VMStateDescription vmstate_env = {
.name = "env",
.version_id = 6,
.minimum_version_id = 6,
.post_load = env_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
VMSTATE_UINTTL(pc, CPUOpenRISCState),
@ -143,7 +110,8 @@ static const VMStateDescription vmstate_env = {
VMSTATE_UINT32(fpcsr, CPUOpenRISCState),
VMSTATE_UINT64(mac, CPUOpenRISCState),
VMSTATE_CPU_TLB(tlb, CPUOpenRISCState),
VMSTATE_STRUCT(tlb, CPUOpenRISCState, 1,
vmstate_cpu_tlb, CPUOpenRISCTLBContext),
VMSTATE_TIMER_PTR(timer, CPUOpenRISCState),
VMSTATE_UINT32(ttmr, CPUOpenRISCState),

View File

@ -29,227 +29,156 @@
#endif
#ifndef CONFIG_USER_ONLY
int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw)
static inline void get_phys_nommu(hwaddr *phys_addr, int *prot,
target_ulong address)
{
*physical = address;
*phys_addr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TLBRET_MATCH;
}
int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw)
static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
target_ulong addr, int need, bool super)
{
int vpn = address >> TARGET_PAGE_BITS;
int idx = vpn & ITLB_MASK;
int right = 0;
int idx = (addr >> TARGET_PAGE_BITS) & TLB_MASK;
uint32_t imr = cpu->env.tlb.itlb[idx].mr;
uint32_t itr = cpu->env.tlb.itlb[idx].tr;
uint32_t dmr = cpu->env.tlb.dtlb[idx].mr;
uint32_t dtr = cpu->env.tlb.dtlb[idx].tr;
int right, match, valid;
if ((cpu->env.tlb->itlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
return TLBRET_NOMATCH;
}
if (!(cpu->env.tlb->itlb[0][idx].mr & 1)) {
return TLBRET_INVALID;
}
if (cpu->env.sr & SR_SM) { /* supervisor mode */
if (cpu->env.tlb->itlb[0][idx].tr & SXE) {
right |= PAGE_EXEC;
}
} else {
if (cpu->env.tlb->itlb[0][idx].tr & UXE) {
right |= PAGE_EXEC;
/* If the ITLB and DTLB indexes map to the same page, we want to
load all permissions all at once. If the destination pages do
not match, zap the one we don't need. */
if (unlikely((itr ^ dtr) & TARGET_PAGE_MASK)) {
if (need & PAGE_EXEC) {
dmr = dtr = 0;
} else {
imr = itr = 0;
}
}
if ((rw & 2) && ((right & PAGE_EXEC) == 0)) {
return TLBRET_BADADDR;
}
/* Check if either of the entries matches the source address. */
match = (imr ^ addr) & TARGET_PAGE_MASK ? 0 : PAGE_EXEC;
match |= (dmr ^ addr) & TARGET_PAGE_MASK ? 0 : PAGE_READ | PAGE_WRITE;
*physical = (cpu->env.tlb->itlb[0][idx].tr & TARGET_PAGE_MASK) |
(address & (TARGET_PAGE_SIZE-1));
/* Check if either of the entries is valid. */
valid = imr & 1 ? PAGE_EXEC : 0;
valid |= dmr & 1 ? PAGE_READ | PAGE_WRITE : 0;
valid &= match;
/* Collect the permissions from the entries. */
right = itr & (super ? SXE : UXE) ? PAGE_EXEC : 0;
right |= dtr & (super ? SRE : URE) ? PAGE_READ : 0;
right |= dtr & (super ? SWE : UWE) ? PAGE_WRITE : 0;
right &= valid;
/* Note that above we validated that itr and dtr match on page.
So oring them together changes nothing without having to
check which one we needed. We also want to store to these
variables even on failure, as it avoids compiler warnings. */
*phys_addr = ((itr | dtr) & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
*prot = right;
return TLBRET_MATCH;
}
int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address, int rw)
{
int vpn = address >> TARGET_PAGE_BITS;
int idx = vpn & DTLB_MASK;
int right = 0;
qemu_log_mask(CPU_LOG_MMU,
"MMU lookup: need %d match %d valid %d right %d -> %s\n",
need, match, valid, right, (need & right) ? "OK" : "FAIL");
if ((cpu->env.tlb->dtlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
return TLBRET_NOMATCH;
}
if (!(cpu->env.tlb->dtlb[0][idx].mr & 1)) {
return TLBRET_INVALID;
/* Check the collective permissions are present. */
if (likely(need & right)) {
return 0; /* success! */
}
if (cpu->env.sr & SR_SM) { /* supervisor mode */
if (cpu->env.tlb->dtlb[0][idx].tr & SRE) {
right |= PAGE_READ;
}
if (cpu->env.tlb->dtlb[0][idx].tr & SWE) {
right |= PAGE_WRITE;
}
/* Determine what kind of failure we have. */
if (need & valid) {
return need & PAGE_EXEC ? EXCP_IPF : EXCP_DPF;
} else {
if (cpu->env.tlb->dtlb[0][idx].tr & URE) {
right |= PAGE_READ;
}
if (cpu->env.tlb->dtlb[0][idx].tr & UWE) {
right |= PAGE_WRITE;
}
return need & PAGE_EXEC ? EXCP_ITLBMISS : EXCP_DTLBMISS;
}
if (!(rw & 1) && ((right & PAGE_READ) == 0)) {
return TLBRET_BADADDR;
}
if ((rw & 1) && ((right & PAGE_WRITE) == 0)) {
return TLBRET_BADADDR;
}
*physical = (cpu->env.tlb->dtlb[0][idx].tr & TARGET_PAGE_MASK) |
(address & (TARGET_PAGE_SIZE-1));
*prot = right;
return TLBRET_MATCH;
}
static int cpu_openrisc_get_phys_addr(OpenRISCCPU *cpu,
hwaddr *physical,
int *prot, target_ulong address,
int rw)
{
int ret = TLBRET_MATCH;
if (rw == MMU_INST_FETCH) { /* ITLB */
*physical = 0;
ret = cpu->env.tlb->cpu_openrisc_map_address_code(cpu, physical,
prot, address, rw);
} else { /* DTLB */
ret = cpu->env.tlb->cpu_openrisc_map_address_data(cpu, physical,
prot, address, rw);
}
return ret;
}
#endif
static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
target_ulong address,
int rw, int tlb_error)
static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
int exception)
{
CPUState *cs = CPU(cpu);
int exception = 0;
switch (tlb_error) {
default:
if (rw == 2) {
exception = EXCP_IPF;
} else {
exception = EXCP_DPF;
}
break;
#ifndef CONFIG_USER_ONLY
case TLBRET_BADADDR:
if (rw == 2) {
exception = EXCP_IPF;
} else {
exception = EXCP_DPF;
}
break;
case TLBRET_INVALID:
case TLBRET_NOMATCH:
/* No TLB match for a mapped address */
if (rw == 2) {
exception = EXCP_ITLBMISS;
} else {
exception = EXCP_DTLBMISS;
}
break;
#endif
}
cs->exception_index = exception;
cpu->env.eear = address;
cpu->env.lock_addr = -1;
}
#ifndef CONFIG_USER_ONLY
int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
#ifdef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;
hwaddr physical = 0;
int prot = 0;
ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot,
address, rw);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0) {
cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
ret = 1;
}
return ret;
}
raise_mmu_exception(cpu, address, EXCP_DPF);
return 1;
#else
int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;
cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
ret = 1;
return ret;
}
g_assert_not_reached();
#endif
}
#ifndef CONFIG_USER_ONLY
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int prot, excp, sr = cpu->env.sr;
hwaddr phys_addr;
int prot;
int miss;
/* Check memory for any kind of address, since during debug the
gdb can ask for anything, check data tlb for address */
miss = cpu_openrisc_get_phys_addr(cpu, &phys_addr, &prot, addr, 0);
switch (sr & (SR_DME | SR_IME)) {
case SR_DME | SR_IME:
/* The mmu is definitely enabled. */
excp = get_phys_mmu(cpu, &phys_addr, &prot, addr,
PAGE_EXEC | PAGE_READ | PAGE_WRITE,
(sr & SR_SM) != 0);
return excp ? -1 : phys_addr;
/* Check instruction tlb */
if (miss) {
miss = cpu_openrisc_get_phys_addr(cpu, &phys_addr, &prot, addr,
MMU_INST_FETCH);
}
default:
/* The mmu is partially enabled, and we don't really have
a "real" access type. Begin by trying the mmu, but if
that fails try again without. */
excp = get_phys_mmu(cpu, &phys_addr, &prot, addr,
PAGE_EXEC | PAGE_READ | PAGE_WRITE,
(sr & SR_SM) != 0);
if (!excp) {
return phys_addr;
}
/* fallthru */
/* Last, fall back to a plain address */
if (miss) {
miss = cpu_openrisc_get_phys_nommu(cpu, &phys_addr, &prot, addr, 0);
}
if (miss) {
return -1;
} else {
case 0:
/* The mmu is definitely disabled; lookups never fail. */
get_phys_nommu(&phys_addr, &prot, addr);
return phys_addr;
}
}
void cpu_openrisc_mmu_init(OpenRISCCPU *cpu)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
cpu->env.tlb = g_malloc0(sizeof(CPUOpenRISCTLBContext));
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int prot, excp;
hwaddr phys_addr;
cpu->env.tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
cpu->env.tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
if (mmu_idx == MMU_NOMMU_IDX) {
/* The mmu is disabled; lookups never fail. */
get_phys_nommu(&phys_addr, &prot, addr);
excp = 0;
} else {
bool super = mmu_idx == MMU_SUPERVISOR_IDX;
int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
: access_type == MMU_DATA_STORE ? PAGE_WRITE
: PAGE_READ);
excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
}
if (unlikely(excp)) {
raise_mmu_exception(cpu, addr, excp);
cpu_loop_exit_restore(cs, retaddr);
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK,
phys_addr & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
}
#endif

View File

@ -1,40 +0,0 @@
/*
* OpenRISC MMU helper routines
*
* Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
* Zhizhou Zhang <etouzh@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#ifndef CONFIG_USER_ONLY
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = openrisc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
/* Raise Exception. */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif

View File

@ -27,13 +27,12 @@
#define TO_SPR(group, number) (((group) << 11) + (number))
void HELPER(mtspr)(CPUOpenRISCState *env,
target_ulong ra, target_ulong rb, target_ulong offset)
void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
{
#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
int spr = (ra | offset);
target_ulong mr;
int idx;
switch (spr) {
@ -57,26 +56,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
break;
case TO_SPR(0, 17): /* SR */
if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
(rb & (SR_IME | SR_DME | SR_SM))) {
tlb_flush(cs);
}
cpu_set_sr(env, rb);
if (env->sr & SR_DME) {
env->tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_data;
} else {
env->tlb->cpu_openrisc_map_address_data =
&cpu_openrisc_get_phys_nommu;
}
if (env->sr & SR_IME) {
env->tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_code;
} else {
env->tlb->cpu_openrisc_map_address_code =
&cpu_openrisc_get_phys_nommu;
}
break;
case TO_SPR(0, 18): /* PPC */
@ -98,18 +78,22 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(0, 1024) ... TO_SPR(0, 1024 + (16 * 32)): /* Shadow GPRs */
idx = (spr - 1024);
env->shadow_gpr[idx / 32][idx % 32] = rb;
case TO_SPR(1, 512) ... TO_SPR(1, 512+DTLB_SIZE-1): /* DTLBW0MR 0-127 */
idx = spr - TO_SPR(1, 512);
if (!(rb & 1)) {
tlb_flush_page(cs, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK);
}
env->tlb->dtlb[0][idx].mr = rb;
break;
case TO_SPR(1, 640) ... TO_SPR(1, 640+DTLB_SIZE-1): /* DTLBW0TR 0-127 */
case TO_SPR(1, 512) ... TO_SPR(1, 512 + TLB_SIZE - 1): /* DTLBW0MR 0-127 */
idx = spr - TO_SPR(1, 512);
mr = env->tlb.dtlb[idx].mr;
if (mr & 1) {
tlb_flush_page(cs, mr & TARGET_PAGE_MASK);
}
if (rb & 1) {
tlb_flush_page(cs, rb & TARGET_PAGE_MASK);
}
env->tlb.dtlb[idx].mr = rb;
break;
case TO_SPR(1, 640) ... TO_SPR(1, 640 + TLB_SIZE - 1): /* DTLBW0TR 0-127 */
idx = spr - TO_SPR(1, 640);
env->tlb->dtlb[0][idx].tr = rb;
env->tlb.dtlb[idx].tr = rb;
break;
case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
@ -118,17 +102,21 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
break;
case TO_SPR(2, 512) ... TO_SPR(2, 512+ITLB_SIZE-1): /* ITLBW0MR 0-127 */
idx = spr - TO_SPR(2, 512);
if (!(rb & 1)) {
tlb_flush_page(cs, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK);
}
env->tlb->itlb[0][idx].mr = rb;
break;
case TO_SPR(2, 640) ... TO_SPR(2, 640+ITLB_SIZE-1): /* ITLBW0TR 0-127 */
case TO_SPR(2, 512) ... TO_SPR(2, 512 + TLB_SIZE - 1): /* ITLBW0MR 0-127 */
idx = spr - TO_SPR(2, 512);
mr = env->tlb.itlb[idx].mr;
if (mr & 1) {
tlb_flush_page(cs, mr & TARGET_PAGE_MASK);
}
if (rb & 1) {
tlb_flush_page(cs, rb & TARGET_PAGE_MASK);
}
env->tlb.itlb[idx].mr = rb;
break;
case TO_SPR(2, 640) ... TO_SPR(2, 640 + TLB_SIZE - 1): /* ITLBW0TR 0-127 */
idx = spr - TO_SPR(2, 640);
env->tlb->itlb[0][idx].tr = rb;
env->tlb.itlb[idx].tr = rb;
break;
case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
@ -137,6 +125,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
break;
case TO_SPR(5, 1): /* MACLO */
env->mac = deposit64(env->mac, 0, 32, rb);
break;
@ -153,7 +142,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
}
break;
case TO_SPR(9, 0): /* PICMR */
env->picmr |= rb;
env->picmr = rb;
break;
case TO_SPR(9, 2): /* PICSR */
env->picsr &= ~rb;
@ -201,13 +190,12 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
#endif
}
target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
target_ulong rd, target_ulong ra, uint32_t offset)
target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
target_ulong spr)
{
#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
int spr = (ra | offset);
int idx;
switch (spr) {
@ -259,13 +247,13 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
idx = (spr - 1024);
return env->shadow_gpr[idx / 32][idx % 32];
case TO_SPR(1, 512) ... TO_SPR(1, 512+DTLB_SIZE-1): /* DTLBW0MR 0-127 */
case TO_SPR(1, 512) ... TO_SPR(1, 512 + TLB_SIZE - 1): /* DTLBW0MR 0-127 */
idx = spr - TO_SPR(1, 512);
return env->tlb->dtlb[0][idx].mr;
return env->tlb.dtlb[idx].mr;
case TO_SPR(1, 640) ... TO_SPR(1, 640+DTLB_SIZE-1): /* DTLBW0TR 0-127 */
case TO_SPR(1, 640) ... TO_SPR(1, 640 + TLB_SIZE - 1): /* DTLBW0TR 0-127 */
idx = spr - TO_SPR(1, 640);
return env->tlb->dtlb[0][idx].tr;
return env->tlb.dtlb[idx].tr;
case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
@ -275,13 +263,13 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
break;
case TO_SPR(2, 512) ... TO_SPR(2, 512+ITLB_SIZE-1): /* ITLBW0MR 0-127 */
case TO_SPR(2, 512) ... TO_SPR(2, 512 + TLB_SIZE - 1): /* ITLBW0MR 0-127 */
idx = spr - TO_SPR(2, 512);
return env->tlb->itlb[0][idx].mr;
return env->tlb.itlb[idx].mr;
case TO_SPR(2, 640) ... TO_SPR(2, 640+ITLB_SIZE-1): /* ITLBW0TR 0-127 */
case TO_SPR(2, 640) ... TO_SPR(2, 640 + TLB_SIZE - 1): /* ITLBW0TR 0-127 */
idx = spr - TO_SPR(2, 640);
return env->tlb->itlb[0][idx].tr;
return env->tlb.itlb[idx].tr;
case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */

View File

@ -36,22 +36,29 @@
#include "trace-tcg.h"
#include "exec/log.h"
#define LOG_DIS(str, ...) \
qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->base.pc_next, \
## __VA_ARGS__)
/* is_jmp field values */
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
#define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
#define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
typedef struct DisasContext {
DisasContextBase base;
uint32_t mem_idx;
uint32_t tb_flags;
uint32_t delayed_branch;
/* If not -1, jmp_pc contains this value and so is a direct jump. */
target_ulong jmp_pc_imm;
} DisasContext;
static inline bool is_user(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
return true;
#else
return !(dc->tb_flags & TB_FLAGS_SM);
#endif
}
/* Include the auto-generated decoder. */
#include "decode.inc.c"
@ -165,34 +172,6 @@ static void check_ov64s(DisasContext *dc)
} \
} while (0)
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
if (unlikely(dc->base.singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (dc->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
if (use_goto_tb(dc, dest)) {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_goto_tb(n);
tcg_gen_exit_tb(dc->base.tb, n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->base.singlestep_enabled) {
gen_exception(dc, EXCP_DEBUG);
}
tcg_gen_exit_tb(NULL, 0);
}
}
static void gen_ove_cy(DisasContext *dc)
{
if (dc->tb_flags & SR_OVE) {
@ -457,7 +436,6 @@ static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
static bool trans_l_add(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.add r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_add(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -465,7 +443,6 @@ static bool trans_l_add(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_addc(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.addc r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_addc(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -473,7 +450,6 @@ static bool trans_l_addc(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_sub(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.sub r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_sub(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -481,7 +457,6 @@ static bool trans_l_sub(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_and(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.and r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_and_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -489,7 +464,6 @@ static bool trans_l_and(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_or(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.or r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_or_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -497,7 +471,6 @@ static bool trans_l_or(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_xor(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.xor r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_xor_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -505,7 +478,6 @@ static bool trans_l_xor(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_sll(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.sll r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_shl_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -513,7 +485,6 @@ static bool trans_l_sll(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_srl(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.srl r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_shr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -521,7 +492,6 @@ static bool trans_l_srl(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_sra(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.sra r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_sar_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -529,7 +499,6 @@ static bool trans_l_sra(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_ror(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.ror r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
tcg_gen_rotr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -537,7 +506,6 @@ static bool trans_l_ror(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_exths(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.exths r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_ext16s_tl(cpu_R[a->d], cpu_R[a->a]);
return true;
@ -545,7 +513,6 @@ static bool trans_l_exths(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_extbs(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.extbs r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_ext8s_tl(cpu_R[a->d], cpu_R[a->a]);
return true;
@ -553,7 +520,6 @@ static bool trans_l_extbs(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_exthz(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.exthz r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_ext16u_tl(cpu_R[a->d], cpu_R[a->a]);
return true;
@ -561,7 +527,6 @@ static bool trans_l_exthz(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_extbz(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.extbz r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_ext8u_tl(cpu_R[a->d], cpu_R[a->a]);
return true;
@ -570,7 +535,6 @@ static bool trans_l_extbz(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_cmov(DisasContext *dc, arg_dab *a, uint32_t insn)
{
TCGv zero;
LOG_DIS("l.cmov r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
zero = tcg_const_tl(0);
@ -582,8 +546,6 @@ static bool trans_l_cmov(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_ff1(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.ff1 r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_ctzi_tl(cpu_R[a->d], cpu_R[a->a], -1);
tcg_gen_addi_tl(cpu_R[a->d], cpu_R[a->d], 1);
@ -592,8 +554,6 @@ static bool trans_l_ff1(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_fl1(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("l.fl1 r%d, r%d\n", a->d, a->a);
check_r0_write(a->d);
tcg_gen_clzi_tl(cpu_R[a->d], cpu_R[a->a], TARGET_LONG_BITS);
tcg_gen_subfi_tl(cpu_R[a->d], TARGET_LONG_BITS, cpu_R[a->d]);
@ -602,8 +562,6 @@ static bool trans_l_fl1(DisasContext *dc, arg_da *a, uint32_t insn)
static bool trans_l_mul(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.mul r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_mul(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -611,8 +569,6 @@ static bool trans_l_mul(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_mulu(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.mulu r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_mulu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -620,8 +576,6 @@ static bool trans_l_mulu(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_div(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.div r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_div(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -629,8 +583,6 @@ static bool trans_l_div(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_divu(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("l.divu r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_divu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
return true;
@ -638,14 +590,12 @@ static bool trans_l_divu(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_l_muld(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.muld r%d, r%d\n", a->a, a->b);
gen_muld(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_muldu(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.muldu r%d, r%d\n", a->a, a->b);
gen_muldu(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
@ -654,8 +604,8 @@ static bool trans_l_j(DisasContext *dc, arg_l_j *a, uint32_t insn)
{
target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
LOG_DIS("l.j %d\n", a->n);
tcg_gen_movi_tl(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
return true;
}
@ -665,11 +615,11 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a, uint32_t insn)
target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
target_ulong ret_pc = dc->base.pc_next + 8;
LOG_DIS("l.jal %d\n", a->n);
tcg_gen_movi_tl(cpu_R[9], ret_pc);
/* Optimize jal being used to load the PC for PIC. */
if (tmp_pc != ret_pc) {
tcg_gen_movi_tl(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
}
return true;
@ -692,21 +642,18 @@ static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
static bool trans_l_bf(DisasContext *dc, arg_l_bf *a, uint32_t insn)
{
LOG_DIS("l.bf %d\n", a->n);
do_bf(dc, a, TCG_COND_NE);
return true;
}
static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a, uint32_t insn)
{
LOG_DIS("l.bnf %d\n", a->n);
do_bf(dc, a, TCG_COND_EQ);
return true;
}
static bool trans_l_jr(DisasContext *dc, arg_l_jr *a, uint32_t insn)
{
LOG_DIS("l.jr r%d\n", a->b);
tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
dc->delayed_branch = 2;
return true;
@ -714,7 +661,6 @@ static bool trans_l_jr(DisasContext *dc, arg_l_jr *a, uint32_t insn)
static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a, uint32_t insn)
{
LOG_DIS("l.jalr r%d\n", a->b);
tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
dc->delayed_branch = 2;
@ -725,8 +671,6 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a, uint32_t insn)
{
TCGv ea;
LOG_DIS("l.lwa r%d, r%d, %d\n", a->d, a->a, a->i);
check_r0_write(a->d);
ea = tcg_temp_new();
tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
@ -750,42 +694,36 @@ static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop)
static bool trans_l_lwz(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lwz r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_TEUL);
return true;
}
static bool trans_l_lws(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lws r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_TESL);
return true;
}
static bool trans_l_lbz(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lbz r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_UB);
return true;
}
static bool trans_l_lbs(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lbs r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_SB);
return true;
}
static bool trans_l_lhz(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lhz r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_TEUW);
return true;
}
static bool trans_l_lhs(DisasContext *dc, arg_load *a, uint32_t insn)
{
LOG_DIS("l.lhs r%d, r%d, %d\n", a->d, a->a, a->i);
do_load(dc, a, MO_TESW);
return true;
}
@ -795,8 +733,6 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a, uint32_t insn)
TCGv ea, val;
TCGLabel *lab_fail, *lab_done;
LOG_DIS("l.swa r%d, r%d, %d\n", a->a, a->b, a->i);
ea = tcg_temp_new();
tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
@ -837,28 +773,24 @@ static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop)
static bool trans_l_sw(DisasContext *dc, arg_store *a, uint32_t insn)
{
LOG_DIS("l.sw r%d, r%d, %d\n", a->a, a->b, a->i);
do_store(dc, a, MO_TEUL);
return true;
}
static bool trans_l_sb(DisasContext *dc, arg_store *a, uint32_t insn)
{
LOG_DIS("l.sb r%d, r%d, %d\n", a->a, a->b, a->i);
do_store(dc, a, MO_UB);
return true;
}
static bool trans_l_sh(DisasContext *dc, arg_store *a, uint32_t insn)
{
LOG_DIS("l.sh r%d, r%d, %d\n", a->a, a->b, a->i);
do_store(dc, a, MO_TEUW);
return true;
}
static bool trans_l_nop(DisasContext *dc, arg_l_nop *a, uint32_t insn)
{
LOG_DIS("l.nop %d\n", a->k);
return true;
}
@ -866,7 +798,6 @@ static bool trans_l_addi(DisasContext *dc, arg_rri *a, uint32_t insn)
{
TCGv t0;
LOG_DIS("l.addi r%d, r%d, %d\n", a->d, a->a, a->i);
check_r0_write(a->d);
t0 = tcg_const_tl(a->i);
gen_add(dc, cpu_R[a->d], cpu_R[a->a], t0);
@ -878,7 +809,6 @@ static bool trans_l_addic(DisasContext *dc, arg_rri *a, uint32_t insn)
{
TCGv t0;
LOG_DIS("l.addic r%d, r%d, %d\n", a->d, a->a, a->i);
check_r0_write(a->d);
t0 = tcg_const_tl(a->i);
gen_addc(dc, cpu_R[a->d], cpu_R[a->a], t0);
@ -890,7 +820,6 @@ static bool trans_l_muli(DisasContext *dc, arg_rri *a, uint32_t insn)
{
TCGv t0;
LOG_DIS("l.muli r%d, r%d, %d\n", a->d, a->a, a->i);
check_r0_write(a->d);
t0 = tcg_const_tl(a->i);
gen_mul(dc, cpu_R[a->d], cpu_R[a->a], t0);
@ -902,7 +831,6 @@ static bool trans_l_maci(DisasContext *dc, arg_l_maci *a, uint32_t insn)
{
TCGv t0;
LOG_DIS("l.maci r%d, %d\n", a->a, a->i);
t0 = tcg_const_tl(a->i);
gen_mac(dc, cpu_R[a->a], t0);
tcg_temp_free(t0);
@ -911,7 +839,6 @@ static bool trans_l_maci(DisasContext *dc, arg_l_maci *a, uint32_t insn)
static bool trans_l_andi(DisasContext *dc, arg_rrk *a, uint32_t insn)
{
LOG_DIS("l.andi r%d, r%d, %d\n", a->d, a->a, a->k);
check_r0_write(a->d);
tcg_gen_andi_tl(cpu_R[a->d], cpu_R[a->a], a->k);
return true;
@ -919,7 +846,6 @@ static bool trans_l_andi(DisasContext *dc, arg_rrk *a, uint32_t insn)
static bool trans_l_ori(DisasContext *dc, arg_rrk *a, uint32_t insn)
{
LOG_DIS("l.ori r%d, r%d, %d\n", a->d, a->a, a->k);
check_r0_write(a->d);
tcg_gen_ori_tl(cpu_R[a->d], cpu_R[a->a], a->k);
return true;
@ -927,7 +853,6 @@ static bool trans_l_ori(DisasContext *dc, arg_rrk *a, uint32_t insn)
static bool trans_l_xori(DisasContext *dc, arg_rri *a, uint32_t insn)
{
LOG_DIS("l.xori r%d, r%d, %d\n", a->d, a->a, a->i);
check_r0_write(a->d);
tcg_gen_xori_tl(cpu_R[a->d], cpu_R[a->a], a->i);
return true;
@ -935,72 +860,73 @@ static bool trans_l_xori(DisasContext *dc, arg_rri *a, uint32_t insn)
static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a, uint32_t insn)
{
LOG_DIS("l.mfspr r%d, r%d, %d\n", a->d, a->a, a->k);
check_r0_write(a->d);
#ifdef CONFIG_USER_ONLY
gen_illegal_exception(dc);
#else
if (dc->mem_idx == MMU_USER_IDX) {
if (is_user(dc)) {
gen_illegal_exception(dc);
} else {
TCGv_i32 ti = tcg_const_i32(a->k);
gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], cpu_R[a->a], ti);
tcg_temp_free_i32(ti);
TCGv spr = tcg_temp_new();
tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], spr);
tcg_temp_free(spr);
}
#endif
return true;
}
static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a, uint32_t insn)
{
LOG_DIS("l.mtspr r%d, r%d, %d\n", a->a, a->b, a->k);
#ifdef CONFIG_USER_ONLY
gen_illegal_exception(dc);
#else
if (dc->mem_idx == MMU_USER_IDX) {
if (is_user(dc)) {
gen_illegal_exception(dc);
} else {
TCGv_i32 ti = tcg_const_i32(a->k);
gen_helper_mtspr(cpu_env, cpu_R[a->a], cpu_R[a->b], ti);
tcg_temp_free_i32(ti);
TCGv spr;
/* For SR, we will need to exit the TB to recognize the new
* exception state. For NPC, in theory this counts as a branch
* (although the SPR only exists for use by an ICE). Save all
* of the cpu state first, allowing it to be overwritten.
*/
if (dc->delayed_branch) {
tcg_gen_mov_tl(cpu_pc, jmp_pc);
tcg_gen_discard_tl(jmp_pc);
} else {
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
}
dc->base.is_jmp = DISAS_EXIT;
spr = tcg_temp_new();
tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
gen_helper_mtspr(cpu_env, spr, cpu_R[a->b]);
tcg_temp_free(spr);
}
#endif
return true;
}
static bool trans_l_mac(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.mac r%d, r%d\n", a->a, a->b);
gen_mac(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_msb(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.msb r%d, r%d\n", a->a, a->b);
gen_msb(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_macu(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.mac r%d, r%d\n", a->a, a->b);
gen_macu(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_msbu(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("l.msb r%d, r%d\n", a->a, a->b);
gen_msbu(dc, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_slli(DisasContext *dc, arg_dal *a, uint32_t insn)
{
LOG_DIS("l.slli r%d, r%d, %d\n", a->d, a->a, a->l);
check_r0_write(a->d);
tcg_gen_shli_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
return true;
@ -1008,7 +934,6 @@ static bool trans_l_slli(DisasContext *dc, arg_dal *a, uint32_t insn)
static bool trans_l_srli(DisasContext *dc, arg_dal *a, uint32_t insn)
{
LOG_DIS("l.srli r%d, r%d, %d\n", a->d, a->a, a->l);
check_r0_write(a->d);
tcg_gen_shri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
return true;
@ -1016,7 +941,6 @@ static bool trans_l_srli(DisasContext *dc, arg_dal *a, uint32_t insn)
static bool trans_l_srai(DisasContext *dc, arg_dal *a, uint32_t insn)
{
LOG_DIS("l.srai r%d, r%d, %d\n", a->d, a->a, a->l);
check_r0_write(a->d);
tcg_gen_sari_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
return true;
@ -1024,7 +948,6 @@ static bool trans_l_srai(DisasContext *dc, arg_dal *a, uint32_t insn)
static bool trans_l_rori(DisasContext *dc, arg_dal *a, uint32_t insn)
{
LOG_DIS("l.rori r%d, r%d, %d\n", a->d, a->a, a->l);
check_r0_write(a->d);
tcg_gen_rotri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
return true;
@ -1032,7 +955,6 @@ static bool trans_l_rori(DisasContext *dc, arg_dal *a, uint32_t insn)
static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a, uint32_t insn)
{
LOG_DIS("l.movhi r%d, %d\n", a->d, a->k);
check_r0_write(a->d);
tcg_gen_movi_tl(cpu_R[a->d], a->k << 16);
return true;
@ -1040,7 +962,6 @@ static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a, uint32_t insn)
static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a, uint32_t insn)
{
LOG_DIS("l.macrc r%d\n", a->d);
check_r0_write(a->d);
tcg_gen_trunc_i64_tl(cpu_R[a->d], cpu_mac);
tcg_gen_movi_i64(cpu_mac, 0);
@ -1049,147 +970,126 @@ static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a, uint32_t insn)
static bool trans_l_sfeq(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfeq r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfne(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfne r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfgtu r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfgeu r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfltu(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfltu r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfleu(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfleu r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfgts(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfgts r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfges(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfges r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sflts(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sflts r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfles(DisasContext *dc, arg_ab *a, TCGCond cond)
{
LOG_DIS("l.sfles r%d, r%d\n", a->a, a->b);
tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
return true;
}
static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfeqi r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfnei(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfnei r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfgtui r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfgeui r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfltui(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfltui r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfleui(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfleui r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfgtsi r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfgesi r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sfltsi r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sflesi(DisasContext *dc, arg_ai *a, TCGCond cond)
{
LOG_DIS("l.sflesi r%d, %d\n", a->a, a->i);
tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], a->i);
return true;
}
static bool trans_l_sys(DisasContext *dc, arg_l_sys *a, uint32_t insn)
{
LOG_DIS("l.sys %d\n", a->k);
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_SYSCALL);
dc->base.is_jmp = DISAS_NORETURN;
@ -1198,7 +1098,6 @@ static bool trans_l_sys(DisasContext *dc, arg_l_sys *a, uint32_t insn)
static bool trans_l_trap(DisasContext *dc, arg_l_trap *a, uint32_t insn)
{
LOG_DIS("l.trap %d\n", a->k);
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_TRAP);
dc->base.is_jmp = DISAS_NORETURN;
@ -1207,37 +1106,28 @@ static bool trans_l_trap(DisasContext *dc, arg_l_trap *a, uint32_t insn)
static bool trans_l_msync(DisasContext *dc, arg_l_msync *a, uint32_t insn)
{
LOG_DIS("l.msync\n");
tcg_gen_mb(TCG_MO_ALL);
return true;
}
static bool trans_l_psync(DisasContext *dc, arg_l_psync *a, uint32_t insn)
{
LOG_DIS("l.psync\n");
return true;
}
static bool trans_l_csync(DisasContext *dc, arg_l_csync *a, uint32_t insn)
{
LOG_DIS("l.csync\n");
return true;
}
static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a, uint32_t insn)
{
LOG_DIS("l.rfe\n");
#ifdef CONFIG_USER_ONLY
gen_illegal_exception(dc);
#else
if (dc->mem_idx == MMU_USER_IDX) {
if (is_user(dc)) {
gen_illegal_exception(dc);
} else {
gen_helper_rfe(cpu_env);
dc->base.is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_EXIT;
}
#endif
return true;
}
@ -1274,56 +1164,48 @@ static void do_fpcmp(DisasContext *dc, arg_ab *a,
static bool trans_lf_add_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.add.s r%d, r%d, r%d\n", a->d, a->a, a->b);
do_fp3(dc, a, gen_helper_float_add_s);
return true;
}
static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.sub.s r%d, r%d, r%d\n", a->d, a->a, a->b);
do_fp3(dc, a, gen_helper_float_sub_s);
return true;
}
static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.mul.s r%d, r%d, r%d\n", a->d, a->a, a->b);
do_fp3(dc, a, gen_helper_float_mul_s);
return true;
}
static bool trans_lf_div_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.div.s r%d, r%d, r%d\n", a->d, a->a, a->b);
do_fp3(dc, a, gen_helper_float_div_s);
return true;
}
static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.rem.s r%d, r%d, r%d\n", a->d, a->a, a->b);
do_fp3(dc, a, gen_helper_float_rem_s);
return true;
}
static bool trans_lf_itof_s(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("lf.itof.s r%d, r%d\n", a->d, a->a);
do_fp2(dc, a, gen_helper_itofs);
return true;
}
static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a, uint32_t insn)
{
LOG_DIS("lf.ftoi.s r%d, r%d\n", a->d, a->a);
do_fp2(dc, a, gen_helper_ftois);
return true;
}
static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a, uint32_t insn)
{
LOG_DIS("lf.madd.s r%d, r%d, r%d\n", a->d, a->a, a->b);
check_r0_write(a->d);
gen_helper_float_madd_s(cpu_R[a->d], cpu_env, cpu_R[a->d],
cpu_R[a->a], cpu_R[a->b]);
@ -1333,42 +1215,36 @@ static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a, uint32_t insn)
static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sfeq.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
return true;
}
static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sfne.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
return true;
}
static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sfgt.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
return true;
}
static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sfge.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
return true;
}
static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sflt.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
return true;
}
static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a, uint32_t insn)
{
LOG_DIS("lf.sfle.s r%d, r%d\n", a->a, a->b);
do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
return true;
}
@ -1382,6 +1258,8 @@ static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
dc->mem_idx = cpu_mmu_index(env, false);
dc->tb_flags = dc->base.tb->flags;
dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
dc->jmp_pc_imm = -1;
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
dc->base.max_insns = MIN(dc->base.max_insns, bound);
}
@ -1434,50 +1312,81 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
}
dc->base.pc_next += 4;
/* delay slot */
if (dc->delayed_branch) {
dc->delayed_branch--;
if (!dc->delayed_branch) {
tcg_gen_mov_tl(cpu_pc, jmp_pc);
tcg_gen_discard_tl(jmp_pc);
dc->base.is_jmp = DISAS_UPDATE;
return;
}
/* When exiting the delay slot normally, exit via jmp_pc.
* For DISAS_NORETURN, we have raised an exception and already exited.
* For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing
* in the manual saying this is illegal, but it surely it should.
* At least or1ksim overrides pcnext and ignores the branch.
*/
if (dc->delayed_branch
&& --dc->delayed_branch == 0
&& dc->base.is_jmp == DISAS_NEXT) {
dc->base.is_jmp = DISAS_JUMP;
}
}
static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
target_ulong jmp_dest;
/* If we have already exited the TB, nothing following has effect. */
if (dc->base.is_jmp == DISAS_NORETURN) {
return;
}
/* Adjust the delayed branch state for the next TB. */
if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
}
tcg_gen_movi_tl(cpu_ppc, dc->base.pc_next - 4);
if (dc->base.is_jmp == DISAS_NEXT) {
dc->base.is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
}
if (unlikely(dc->base.singlestep_enabled)) {
gen_exception(dc, EXCP_DEBUG);
} else {
switch (dc->base.is_jmp) {
case DISAS_TOO_MANY:
gen_goto_tb(dc, 0, dc->base.pc_next);
/* For DISAS_TOO_MANY, jump to the next insn. */
jmp_dest = dc->base.pc_next;
tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
switch (dc->base.is_jmp) {
case DISAS_JUMP:
jmp_dest = dc->jmp_pc_imm;
if (jmp_dest == -1) {
/* The jump destination is indirect/computed; use jmp_pc. */
tcg_gen_mov_tl(cpu_pc, jmp_pc);
tcg_gen_discard_tl(jmp_pc);
if (unlikely(dc->base.singlestep_enabled)) {
gen_exception(dc, EXCP_DEBUG);
} else {
tcg_gen_lookup_and_goto_ptr();
}
break;
case DISAS_NORETURN:
case DISAS_JUMP:
case DISAS_TB_JUMP:
break;
case DISAS_UPDATE:
/* indicate that the hash table must be used
to find the next TB */
tcg_gen_exit_tb(NULL, 0);
break;
default:
g_assert_not_reached();
}
/* The jump destination is direct; use jmp_pc_imm.
However, we will have stored into jmp_pc as well;
we know now that it wasn't needed. */
tcg_gen_discard_tl(jmp_pc);
/* fallthru */
case DISAS_TOO_MANY:
if (unlikely(dc->base.singlestep_enabled)) {
tcg_gen_movi_tl(cpu_pc, jmp_dest);
gen_exception(dc, EXCP_DEBUG);
} else if ((dc->base.pc_first ^ jmp_dest) & TARGET_PAGE_MASK) {
tcg_gen_movi_tl(cpu_pc, jmp_dest);
tcg_gen_lookup_and_goto_ptr();
} else {
tcg_gen_goto_tb(0);
tcg_gen_movi_tl(cpu_pc, jmp_dest);
tcg_gen_exit_tb(dc->base.tb, 0);
}
break;
case DISAS_EXIT:
if (unlikely(dc->base.singlestep_enabled)) {
gen_exception(dc, EXCP_DEBUG);
} else {
tcg_gen_exit_tb(NULL, 0);
}
break;
default:
g_assert_not_reached();
}
}