Add CPUClass::tlb_fill.

Improve tlb_vaddr_to_host for use by ARM SVE no-fault loads.
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAlzVx4UdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+U1Af/b3cV5d5a1LWRdLgR
 71JCPK/M3o43r2U9wCSikteXkmNBEdEoc5+WRk2SuZFLW/JB1DHDY7/gISPIhfoB
 ZIza2TxD/QK1CQ5/mMWruKBlyygbYYZgsYaaNsMJRJgicgOSjTN0nuHMbIfv3tAN
 mu+IlkD0LdhVjP0fz30Jpew3b3575RCjYxEPM6KQI3RxtQFjZ3FhqV5hKR4vtdP5
 yLWJQzwAbaCB3SZUvvp7TN1ZsmeyLpc+Yz/YtRTqQedo7SNWWBKldLhqq4bZnH1I
 AkzHbtWIOBrjWJ34ZMAgI5Q56Du9TBbBvCdM9azmrQjSu/2kdsPBPcUyOpnUCsCx
 NyXo9g==
 =x71l
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190510' into staging

Add CPUClass::tlb_fill.
Improve tlb_vaddr_to_host for use by ARM SVE no-fault loads.

# gpg: Signature made Fri 10 May 2019 19:48:37 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190510: (27 commits)
  tcg: Use tlb_fill probe from tlb_vaddr_to_host
  tcg: Remove CPUClass::handle_mmu_fault
  tcg: Use CPUClass::tlb_fill in cputlb.c
  target/xtensa: Convert to CPUClass::tlb_fill
  target/unicore32: Convert to CPUClass::tlb_fill
  target/tricore: Convert to CPUClass::tlb_fill
  target/tilegx: Convert to CPUClass::tlb_fill
  target/sparc: Convert to CPUClass::tlb_fill
  target/sh4: Convert to CPUClass::tlb_fill
  target/s390x: Convert to CPUClass::tlb_fill
  target/riscv: Convert to CPUClass::tlb_fill
  target/ppc: Convert to CPUClass::tlb_fill
  target/openrisc: Convert to CPUClass::tlb_fill
  target/nios2: Convert to CPUClass::tlb_fill
  target/moxie: Convert to CPUClass::tlb_fill
  target/mips: Convert to CPUClass::tlb_fill
  target/mips: Tidy control flow in mips_cpu_handle_mmu_fault
  target/mips: Pass a valid error to raise_mmu_exception for user-only
  target/microblaze: Convert to CPUClass::tlb_fill
  target/m68k: Convert to CPUClass::tlb_fill
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-05-16 13:15:08 +01:00
commit d8276573da
83 changed files with 868 additions and 1131 deletions

View File

@ -855,6 +855,25 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
return ram_addr;
}
/*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
* be discarded and looked up again (e.g. via tlb_entry()).
*/
static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
bool ok;
/*
* This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop.
*/
ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
assert(ok);
}
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, int size)
@ -938,6 +957,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
}
static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
{
#if TCG_OVERSIZED_GUEST
return *(target_ulong *)((uintptr_t)entry + ofs);
#else
/* ofs might correspond to .addr_write, so use atomic_read */
return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
#endif
}
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
@ -948,14 +977,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
target_ulong cmp;
/* elt_ofs might correspond to .addr_write, so use atomic_read */
#if TCG_OVERSIZED_GUEST
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
#else
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
#endif
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@ -1039,6 +1061,56 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
}
}
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
uintptr_t tlb_addr, page;
size_t elt_ofs;
switch (access_type) {
case MMU_DATA_LOAD:
elt_ofs = offsetof(CPUTLBEntry, addr_read);
break;
case MMU_DATA_STORE:
elt_ofs = offsetof(CPUTLBEntry, addr_write);
break;
case MMU_INST_FETCH:
elt_ofs = offsetof(CPUTLBEntry, addr_code);
break;
default:
g_assert_not_reached();
}
page = addr & TARGET_PAGE_MASK;
tlb_addr = tlb_read_ofs(entry, elt_ofs);
if (!tlb_hit_page(tlb_addr, page)) {
uintptr_t index = tlb_index(env, mmu_idx, addr);
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
CPUState *cs = ENV_GET_CPU(env);
CPUClass *cc = CPU_GET_CLASS(cs);
if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
/* Non-faulting page table read failed. */
return NULL;
}
/* TLB resize via tlb_fill may have moved the entry. */
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
}
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
return NULL;
}
return (void *)((uintptr_t)addr + entry->addend);
}
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,

View File

@ -63,8 +63,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
{
CPUState *cpu = current_cpu;
CPUClass *cc;
int ret;
unsigned long address = (unsigned long)info->si_addr;
MMUAccessType access_type;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
@ -147,35 +147,17 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
are still valid segv ones */
address = h2g_nocheck(address);
cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
if (ret == 0) {
/* The MMU fault was handled without causing real CPU fault.
* Retain helper_retaddr for a possible second fault.
*/
return 1;
}
/* All other paths lead to cpu_exit; clear helper_retaddr
* for next execution.
/*
* There is no way the target can handle this other than raising
* an exception. Undo signal and retaddr state prior to longjmp.
*/
sigprocmask(SIG_SETMASK, old_set, NULL);
helper_retaddr = 0;
if (ret < 0) {
return 0; /* not an MMU fault */
}
/* Now we have a real cpu fault. */
cpu_restore_state(cpu, pc, true);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
/* never comes here */
return 1;
cc = CPU_GET_CLASS(cpu);
access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
g_assert_not_reached();
}
#if defined(__i386__)

View File

@ -433,50 +433,20 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If the TLB contains a host virtual address suitable for direct RAM
* access, then return it. Otherwise (TLB miss, TLB entry is for an
* I/O access, etc) return NULL.
*
* This is the equivalent of the initial fast-path code used by
* TCG backends for guest load and store accesses.
* If we can translate a host virtual address suitable for direct RAM
* access, without causing a guest exception, then return it.
* Otherwise (TLB entry is for an I/O access, guest software
* TLB fill required, etc) return NULL.
*/
#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
int access_type, int mmu_idx)
MMUAccessType access_type, int mmu_idx)
{
#if defined(CONFIG_USER_ONLY)
return g2h(addr);
#else
CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
abi_ptr tlb_addr;
uintptr_t haddr;
switch (access_type) {
case 0:
tlb_addr = tlbentry->addr_read;
break;
case 1:
tlb_addr = tlb_addr_write(tlbentry);
break;
case 2:
tlb_addr = tlbentry->addr_code;
break;
default:
g_assert_not_reached();
}
if (!tlb_hit(tlb_addr, addr)) {
/* TLB entry is for a different page */
return NULL;
}
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
return NULL;
}
haddr = addr + tlbentry->addend;
return (void *)haddr;
#endif /* defined(CONFIG_USER_ONLY) */
}
#else
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx);
#endif
#endif /* CPU_LDST_H */

View File

@ -474,15 +474,6 @@ static inline void assert_no_pages_locked(void)
*/
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
/*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
* be discarded and looked up again (e.g. via tlb_entry()).
*/
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
#endif
#if defined(CONFIG_USER_ONLY)

View File

@ -117,7 +117,12 @@ struct TranslationBlock;
* This always includes at least the program counter; some targets
* will need to do more. If this hook is not implemented then the
* default is to call @set_pc(tb->pc).
* @handle_mmu_fault: Callback for handling an MMU fault.
* @tlb_fill: Callback for handling a softmmu tlb miss or user-only
* address fault. For system mode, if the access is valid, call
* tlb_set_page and return true; if the access is invalid, and
* probe is true, return false; otherwise raise an exception and
* do not return. For user-only mode, always raise an exception
* and do not return.
* @get_phys_page_debug: Callback for obtaining a physical address.
* @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
* associated memory transaction attributes to use for the access.
@ -189,8 +194,9 @@ typedef struct CPUClass {
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
int mmu_index);
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);

View File

@ -225,9 +225,8 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
#else
cc->tlb_fill = alpha_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;

View File

@ -475,8 +475,9 @@ void alpha_cpu_list(void);
is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);

View File

@ -104,14 +104,15 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
cs->exception_index = EXCP_MMFAULT;
cpu->env.trap_arg0 = address;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
@ -248,26 +249,31 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return (fail >= 0 ? -1 : phys);
}
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw,
int mmu_idx)
bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
target_ulong phys;
int prot, fail;
fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
fail = get_physical_address(env, addr, 1 << access_type,
mmu_idx, &phys, &prot);
if (unlikely(fail >= 0)) {
if (probe) {
return false;
}
cs->exception_index = EXCP_MMFAULT;
env->trap_arg0 = addr;
env->trap_arg1 = fail;
env->trap_arg2 = (rw == 2 ? -1 : rw);
return 1;
env->trap_arg2 = (access_type == MMU_INST_FETCH ? -1 : access_type);
cpu_loop_exit_restore(cs, retaddr);
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
return true;
}
#endif /* USER_ONLY */

View File

@ -62,20 +62,4 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
env->error_code = 0;
cpu_loop_exit_restore(cs, retaddr);
}
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = alpha_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret != 0)) {
/* Exception index and error code are already set */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif /* CONFIG_USER_ONLY */

View File

@ -2133,23 +2133,6 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
#ifdef CONFIG_USER_ONLY
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
env->exception.vaddress = address;
if (rw == 2) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
#endif
static gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@ -2182,9 +2165,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
@ -2209,6 +2190,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
#endif
}

View File

@ -12596,43 +12596,6 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
}
}
/* Walk the page table and (if the mapping exists) add the page
* to the TLB. Return false on success, or true on failure. Populate
* fsr with ARM DFSR/IFSR fault register format value on failure.
*/
bool arm_tlb_fill(CPUState *cs, vaddr address,
MMUAccessType access_type, int mmu_idx,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret;
MemTxAttrs attrs = {};
ret = get_phys_addr(env, address, access_type,
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
&attrs, &prot, &page_size, fi, NULL);
if (!ret) {
/*
* Map a single [sub]page. Regions smaller than our declared
* target page size are handled specially, so for those we
* pass in the exact addresses.
*/
if (page_size >= TARGET_PAGE_SIZE) {
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
prot, mmu_idx, page_size);
return 0;
}
return ret;
}
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
@ -13111,6 +13074,59 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
#ifdef CONFIG_USER_ONLY
cpu->env.exception.vaddress = address;
if (access_type == MMU_INST_FETCH) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
cpu_loop_exit_restore(cs, retaddr);
#else
hwaddr phys_addr;
target_ulong page_size;
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
/*
* Walk the page table and (if the mapping exists) add the page
* to the TLB. On success, return true. Otherwise, if probing,
* return false. Otherwise populate fsr with ARM DFSR/IFSR fault
* register format, and signal the fault.
*/
ret = get_phys_addr(&cpu->env, address, access_type,
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
&phys_addr, &attrs, &prot, &page_size, &fi, NULL);
if (likely(!ret)) {
/*
* Map a single [sub]page. Regions smaller than our declared
* target page size are handled specially, so for those we
* pass in the exact addresses.
*/
if (page_size >= TARGET_PAGE_SIZE) {
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
prot, mmu_idx, page_size);
return true;
} else if (probe) {
return false;
} else {
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
}
#endif
}
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.

View File

@ -761,10 +761,12 @@ static inline bool arm_extabort_type(MemTxResult result)
return result != MEMTX_DECODE_ERROR;
}
/* Do a page table walk and add page to TLB if possible */
bool arm_tlb_fill(CPUState *cpu, vaddr address,
MMUAccessType access_type, int mmu_idx,
ARMMMUFaultInfo *fi);
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */

View File

@ -126,8 +126,8 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
return syn;
}
static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi)
void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
@ -179,27 +179,6 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
raise_exception(env, exc, syn, target_el);
}
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
bool ret;
ARMMMUFaultInfo fi = {};
ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
}
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
@ -212,7 +191,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
@ -233,7 +212,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */

View File

@ -4598,11 +4598,7 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
* in the real world, obviously.)
*
* Then there are the annoying special cases with watchpoints...
*
* TODO: Add a form of tlb_fill that does not raise an exception,
* with a form of tlb_vaddr_to_host and a set of loads to match.
* The non_fault_vaddr_to_host would handle everything, usually,
* and the loads would handle the iomem path for watchpoints.
* TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
*/
host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
split = max_for_page(addr, mem_off, mem_max);

View File

@ -269,9 +269,8 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
#else
cc->tlb_fill = cris_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_cris_cpu;
#endif

View File

@ -281,8 +281,9 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
return !!(env->pregs[PR_CCS] & U_FLAG);
}
int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0

View File

@ -24,6 +24,7 @@
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
//#define CRIS_HELPER_DEBUG
@ -53,15 +54,15 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
cris_cpu_do_interrupt(cs);
}
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
cs->exception_index = 0xaa;
cpu->env.pregs[PR_EDA] = address;
cpu_dump_state(cs, stderr, 0);
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@ -76,33 +77,19 @@ static void cris_shift_ccs(CPUCRISState *env)
env->pregs[PR_CCS] = ccs;
}
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
struct cris_mmu_result res;
int prot, miss;
int r = -1;
target_ulong phy;
qemu_log_mask(CPU_LOG_MMU, "%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
__func__, address, env->pc, rw);
miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
rw, mmu_idx, 0);
if (miss) {
if (cs->exception_index == EXCP_BUSFAULT) {
cpu_abort(cs,
"CRIS: Illegal recursive bus fault."
"addr=%" VADDR_PRIx " rw=%d\n",
address, rw);
}
env->pregs[PR_EDA] = address;
cs->exception_index = EXCP_BUSFAULT;
env->fault_vector = res.bf_vec;
r = 1;
} else {
access_type, mmu_idx, 0);
if (likely(!miss)) {
/*
* Mask off the cache selection bit. The ETRAX busses do not
* see the top bit.
@ -111,15 +98,29 @@ int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
prot = res.prot;
tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
return true;
}
if (r > 0) {
qemu_log_mask(CPU_LOG_MMU,
"%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
" pc=%x\n", __func__, r, cs->interrupt_request, address,
res.phy, res.bf_vec, env->pc);
if (probe) {
return false;
}
return r;
if (cs->exception_index == EXCP_BUSFAULT) {
cpu_abort(cs, "CRIS: Illegal recursive bus fault."
"addr=%" VADDR_PRIx " access_type=%d\n",
address, access_type);
}
env->pregs[PR_EDA] = address;
cs->exception_index = EXCP_BUSFAULT;
env->fault_vector = res.bf_vec;
if (retaddr) {
if (cpu_restore_state(cs, retaddr, true)) {
/* Evaluate flags after retranslation. */
helper_top_evaluate_flags(env);
}
}
cpu_loop_exit(cs);
}
void crisv10_cpu_do_interrupt(CPUState *cs)

View File

@ -37,34 +37,6 @@
#define D_LOG(...) do { } while (0)
#endif
#if !defined(CONFIG_USER_ONLY)
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
int ret;
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
env->pc, env->pregs[PR_EDA], (void *)retaddr);
ret = cris_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
if (cpu_restore_state(cs, retaddr, true)) {
/* Evaluate flags after retranslation. */
helper_top_evaluate_flags(env);
}
}
cpu_loop_exit(cs);
}
}
#endif
void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
CPUState *cs = CPU(cris_env_get_cpu(env));

View File

@ -163,9 +163,8 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
cc->gdb_read_register = hppa_cpu_gdb_read_register;
cc->gdb_write_register = hppa_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
#else
cc->tlb_fill = hppa_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_hppa_cpu;
#endif

View File

@ -360,10 +360,10 @@ int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
#ifdef CONFIG_USER_ONLY
int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int midx);
#else
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#ifndef CONFIG_USER_ONLY
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot);
extern const MemoryRegionOps hppa_io_eir_ops;

View File

@ -25,8 +25,9 @@
#include "trace.h"
#ifdef CONFIG_USER_ONLY
int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int size, int rw, int mmu_idx)
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
@ -34,7 +35,7 @@ int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
which would affect si_code. */
cs->exception_index = EXCP_DMP;
cpu->env.cr[CR_IOR] = address;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else
static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
@ -213,8 +214,9 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return excp == EXCP_DTLB_MISS ? -1 : phys;
}
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType type, int mmu_idx, uintptr_t retaddr)
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
@ -236,6 +238,9 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
excp = hppa_get_physical_address(env, addr, mmu_idx,
a_prot, &phys, &prot);
if (unlikely(excp >= 0)) {
if (probe) {
return false;
}
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
cs->exception_index = excp;
@ -252,6 +257,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
/* Success! Store the translation into the QEMU TLB. */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */

View File

@ -5915,9 +5915,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->asidx_from_attrs = x86_asidx_from_attrs;
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
@ -5942,6 +5940,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_exit = x86_cpu_exec_exit;
#ifdef CONFIG_TCG
cc->tcg_initialize = tcg_x86_init;
cc->tlb_fill = x86_cpu_tlb_fill;
#endif
cc->disas_set_info = x86_disas_set_info;

View File

@ -1656,8 +1656,9 @@ void host_cpuid(uint32_t function, uint32_t count,
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
int is_write, int mmu_idx);
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY

View File

@ -137,26 +137,7 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
#if defined(CONFIG_USER_ONLY)
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
/* user mode only emulation */
is_write &= 1;
env->cr[2] = addr;
env->error_code = (is_write << PG_ERROR_W_BIT);
env->error_code |= PG_ERROR_U_MASK;
cs->exception_index = EXCP0E_PAGE;
env->exception_is_int = 0;
env->exception_next_eip = -1;
return 1;
}
#else
#if !defined(CONFIG_USER_ONLY)
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
@ -365,8 +346,8 @@ static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
* 0 = nothing more to do
* 1 = generate PF fault
*/
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
@ -691,3 +672,31 @@ do_check_protect_pse36:
return 1;
}
#endif
bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
#ifdef CONFIG_USER_ONLY
/* user mode only emulation */
env->cr[2] = addr;
env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
env->error_code |= PG_ERROR_U_MASK;
cs->exception_index = EXCP0E_PAGE;
env->exception_is_int = 0;
env->exception_next_eip = -1;
cpu_loop_exit_restore(cs, retaddr);
#else
env->retaddr = retaddr;
if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
/* FIXME: On error in get_hphys we have already jumped out. */
g_assert(!probe);
raise_exception_err_ra(env, cs->exception_index,
env->error_code, retaddr);
}
return true;
#endif
}

View File

@ -191,24 +191,3 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
#if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
int ret;
env->retaddr = retaddr;
ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
}
}
#endif

View File

@ -231,9 +231,8 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
#else
cc->tlb_fill = lm32_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu;
#endif

View File

@ -261,8 +261,9 @@ bool lm32_cpu_do_semihosting(CPUState *cs);
#define cpu_list lm32_cpu_list
#define cpu_signal_handler cpu_lm32_signal_handler
int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"

View File

@ -25,8 +25,9 @@
#include "exec/semihost.h"
#include "exec/log.h"
int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
LM32CPU *cpu = LM32_CPU(cs);
CPULM32State *env = &cpu->env;
@ -40,8 +41,7 @@ int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
} else {
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
}
return 0;
return true;
}
hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)

View File

@ -143,21 +143,5 @@ uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
{
return lm32_juart_get_jrx(env->juart_state);
}
/* Try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = lm32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif

View File

@ -269,7 +269,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
cc->tlb_fill = m68k_cpu_tlb_fill;
#if defined(CONFIG_SOFTMMU)
cc->do_unassigned_access = m68k_cpu_unassigned_access;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;

View File

@ -542,8 +542,9 @@ static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
return (env->sr & SR_S) == 0 ? 1 : 0;
}
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int is_asi,
unsigned size);

View File

@ -353,20 +353,7 @@ void m68k_switch_sp(CPUM68KState *env)
env->current_sp = new_sp;
}
#if defined(CONFIG_USER_ONLY)
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
M68kCPU *cpu = M68K_CPU(cs);
cs->exception_index = EXCP_ACCESS;
cpu->env.mmu.ar = address;
return 1;
}
#else
#if !defined(CONFIG_USER_ONLY)
/* MMU: 68040 only */
static void print_address_zone(uint32_t logical, uint32_t physical,
@ -795,11 +782,36 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
/*
* Notify CPU of a pending interrupt. Prioritization and vectoring should
* be handled by the interrupt controller. Real hardware only requests
* the vector when the interrupt is acknowledged by the CPU. For
* simplicity we calculate it when the interrupt is signalled.
*/
void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
{
CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
env->pending_level = level;
env->pending_vector = vector;
if (level) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
}
#endif
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType qemu_access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
#ifndef CONFIG_USER_ONLY
hwaddr physical;
int prot;
int access_type;
@ -812,32 +824,35 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
return 0;
return true;
}
if (rw == 2) {
if (qemu_access_type == MMU_INST_FETCH) {
access_type = ACCESS_CODE;
rw = 0;
} else {
access_type = ACCESS_DATA;
if (rw) {
if (qemu_access_type == MMU_DATA_STORE) {
access_type |= ACCESS_STORE;
}
}
if (mmu_idx != MMU_USER_IDX) {
access_type |= ACCESS_SUPER;
}
ret = get_physical_address(&cpu->env, &physical, &prot,
address, access_type, &page_size);
if (ret == 0) {
if (likely(ret == 0)) {
address &= TARGET_PAGE_MASK;
physical += address & (page_size - 1);
tlb_set_page(cs, address, physical,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
return true;
}
if (probe) {
return false;
}
/* page fault */
env->mmu.ssw = M68K_ATC_040;
switch (size) {
@ -862,31 +877,13 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
if (!(access_type & ACCESS_STORE)) {
env->mmu.ssw |= M68K_RW_040;
}
env->mmu.ar = address;
cs->exception_index = EXCP_ACCESS;
return 1;
}
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
be handled by the interrupt controller. Real hardware only requests
the vector when the interrupt is acknowledged by the CPU. For
simplicitly we calculate it when the interrupt is signalled. */
void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
{
CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
env->pending_level = level;
env->pending_vector = vector;
if (level) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
}
#endif
cs->exception_index = EXCP_ACCESS;
env->mmu.ar = address;
cpu_loop_exit_restore(cs, retaddr);
}
uint32_t HELPER(bitrev)(uint32_t x)
{
x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau);

View File

@ -36,21 +36,6 @@ static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
#else
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
static void cf_rte(CPUM68KState *env)
{
uint32_t sp;

View File

@ -304,9 +304,8 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
#else
cc->tlb_fill = mb_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mb_cpu_transaction_failed;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif

View File

@ -374,8 +374,9 @@ static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
return MMU_KERNEL_IDX;
}
int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"

View File

@ -38,73 +38,74 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->regs[14] = env->sregs[SR_PC];
}
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
cpu_dump_state(cs, stderr, 0);
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
struct microblaze_mmu_lookup lu;
unsigned int hit;
int r = 1;
int prot;
/* Translate if the MMU is available and enabled. */
if (mmu_idx != MMU_NOMMU_IDX) {
uint32_t vaddr, paddr;
struct microblaze_mmu_lookup lu;
hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
if (hit) {
vaddr = address & TARGET_PAGE_MASK;
paddr = lu.paddr + vaddr - lu.vaddr;
qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot);
tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
} else {
env->sregs[SR_EAR] = address;
qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
mmu_idx, address);
switch (lu.err) {
case ERR_PROT:
env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
env->sregs[SR_ESR] |= (rw == 1) << 10;
break;
case ERR_MISS:
env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
env->sregs[SR_ESR] |= (rw == 1) << 10;
break;
default:
abort();
break;
}
if (cs->exception_index == EXCP_MMU) {
cpu_abort(cs, "recursive faults\n");
}
/* TLB miss. */
cs->exception_index = EXCP_MMU;
}
} else {
if (mmu_idx == MMU_NOMMU_IDX) {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
return true;
}
return r;
hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx);
if (likely(hit)) {
uint32_t vaddr = address & TARGET_PAGE_MASK;
uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot);
tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
/* TLB miss. */
if (probe) {
return false;
}
qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
mmu_idx, address);
env->sregs[SR_EAR] = address;
switch (lu.err) {
case ERR_PROT:
env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 17 : 16;
env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
break;
case ERR_MISS:
env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 19 : 18;
env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
break;
default:
abort();
}
if (cs->exception_index == EXCP_MMU) {
cpu_abort(cs, "recursive faults\n");
}
/* TLB miss. */
cs->exception_index = EXCP_MMU;
cpu_loop_exit_restore(cs, retaddr);
}
void mb_cpu_do_interrupt(CPUState *cs)

View File

@ -28,25 +28,6 @@
#define D(x)
#if !defined(CONFIG_USER_ONLY)
/* Try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
{
int test = ctrl & STREAM_TEST;

View File

@ -197,9 +197,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
@ -208,6 +206,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->disas_set_info = mips_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = mips_tcg_init;
cc->tlb_fill = mips_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = 73;

View File

@ -874,31 +874,25 @@ refill:
#endif
#endif
int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr physical;
int prot;
int access_type;
int mips_access_type;
#endif
int ret = 0;
#if 0
log_cpu_state(cs, 0);
#endif
qemu_log_mask(CPU_LOG_MMU,
"%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, env->active_tc.PC, address, rw, mmu_idx);
int ret = TLBRET_BADADDR;
/* data access */
#if !defined(CONFIG_USER_ONLY)
/* XXX: put correct access by using cpu_restore_state() correctly */
access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type, mmu_idx);
mips_access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot, address,
access_type, mips_access_type, mmu_idx);
switch (ret) {
case TLBRET_MATCH:
qemu_log_mask(CPU_LOG_MMU,
@ -915,44 +909,41 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0)
#endif
{
#if !defined(CONFIG_USER_ONLY)
return true;
}
#if !defined(TARGET_MIPS64)
if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
/*
* Memory reads during hardware page table walking are performed
* as if they were kernel-mode load instructions.
*/
int mode = (env->hflags & MIPS_HFLAG_KSU);
bool ret_walker;
env->hflags &= ~MIPS_HFLAG_KSU;
ret_walker = page_table_walk_refill(env, address, rw, mmu_idx);
env->hflags |= mode;
if (ret_walker) {
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type, mmu_idx);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
return ret;
}
if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
/*
* Memory reads during hardware page table walking are performed
* as if they were kernel-mode load instructions.
*/
int mode = (env->hflags & MIPS_HFLAG_KSU);
bool ret_walker;
env->hflags &= ~MIPS_HFLAG_KSU;
ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx);
env->hflags |= mode;
if (ret_walker) {
ret = get_physical_address(env, &physical, &prot, address,
access_type, mips_access_type, mmu_idx);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
}
#endif
#endif
raise_mmu_exception(env, address, rw, ret);
ret = 1;
}
#endif
if (probe) {
return false;
}
#endif
return ret;
raise_mmu_exception(env, address, access_type, ret);
do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
}
#if !defined(CONFIG_USER_ONLY)
#ifndef CONFIG_USER_ONLY
hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
{
hwaddr physical;

View File

@ -202,8 +202,9 @@ void cpu_mips_start_count(CPUMIPSState *env);
void cpu_mips_stop_count(CPUMIPSState *env);
/* helper.c */
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
/* op_helper.c */
uint32_t float_class_s(uint32_t arg, float_status *fst);

View File

@ -2669,21 +2669,6 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
do_raise_exception_err(env, excp, error_code, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
do_raise_exception_err(env, cs->exception_index,
env->error_code, retaddr);
}
}
void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int unused,
unsigned size)

View File

@ -112,9 +112,8 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
#else
cc->tlb_fill = moxie_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu;
#endif

View File

@ -139,7 +139,8 @@ static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
*flags = 0;
}
int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#endif /* MOXIE_CPU_H */

View File

@ -26,20 +26,6 @@
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = moxie_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
cpu_loop_exit_restore(cs, retaddr);
}
}
void helper_raise_exception(CPUMoxieState *env, int ex)
{
CPUState *cs = CPU(moxie_env_get_cpu(env));
@ -85,53 +71,29 @@ void helper_debug(CPUMoxieState *env)
cpu_loop_exit(cs);
}
#if defined(CONFIG_USER_ONLY)
void moxie_cpu_do_interrupt(CPUState *cs)
{
CPUState *cs = CPU(moxie_env_get_cpu(env));
cs->exception_index = -1;
}
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
MoxieCPU *cpu = MOXIE_CPU(cs);
cs->exception_index = 0xaa;
cpu->env.debug1 = address;
cpu_dump_state(cs, stderr, 0);
return 1;
}
#else /* !CONFIG_USER_ONLY */
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool moxie_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
MoxieCPU *cpu = MOXIE_CPU(cs);
CPUMoxieState *env = &cpu->env;
MoxieMMUResult res;
int prot, miss;
target_ulong phy;
int r = 1;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
miss = moxie_mmu_translate(&res, env, address, rw, mmu_idx);
if (miss) {
/* handle the miss. */
phy = 0;
cs->exception_index = MOXIE_EX_MMU_MISS;
} else {
phy = res.phy;
r = 0;
miss = moxie_mmu_translate(&res, env, address, access_type, mmu_idx);
if (likely(!miss)) {
tlb_set_page(cs, address, res.phy, prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
return false;
}
tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
return r;
}
cs->exception_index = MOXIE_EX_MMU_MISS;
cpu_loop_exit_restore(cs, retaddr);
}
void moxie_cpu_do_interrupt(CPUState *cs)
{
@ -156,4 +118,3 @@ hwaddr moxie_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
return phy;
}
#endif

View File

@ -200,9 +200,8 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
cc->dump_state = nios2_cpu_dump_state;
cc->set_pc = nios2_cpu_set_pc;
cc->disas_set_info = nios2_cpu_disas_set_info;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = nios2_cpu_handle_mmu_fault;
#else
cc->tlb_fill = nios2_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
#endif

View File

@ -253,8 +253,9 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
MMU_SUPERVISOR_IDX;
}
int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, int size,
int rw, int mmu_idx);
bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
static inline int cpu_interrupts_enabled(CPUNios2State *env)
{

View File

@ -38,15 +38,12 @@ void nios2_cpu_do_interrupt(CPUState *cs)
env->regs[R_EA] = env->regs[R_PC] + 4;
}
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
/* Page 0x1000 is kuser helper */
if (address < 0x1000 || address >= 0x2000) {
cpu_dump_state(cs, stderr, 0);
}
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@ -203,89 +200,6 @@ void nios2_cpu_do_interrupt(CPUState *cs)
}
}
static int cpu_nios2_handle_virtual_page(
CPUState *cs, target_ulong address, int rw, int mmu_idx)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
CPUNios2State *env = &cpu->env;
target_ulong vaddr, paddr;
Nios2MMULookup lu;
unsigned int hit;
hit = mmu_translate(env, &lu, address, rw, mmu_idx);
if (hit) {
vaddr = address & TARGET_PAGE_MASK;
paddr = lu.paddr + vaddr - lu.vaddr;
if (((rw == 0) && (lu.prot & PAGE_READ)) ||
((rw == 1) && (lu.prot & PAGE_WRITE)) ||
((rw == 2) && (lu.prot & PAGE_EXEC))) {
tlb_set_page(cs, vaddr, paddr, lu.prot,
mmu_idx, TARGET_PAGE_SIZE);
return 0;
} else {
/* Permission violation */
cs->exception_index = (rw == 0) ? EXCP_TLBR :
((rw == 1) ? EXCP_TLBW :
EXCP_TLBX);
}
} else {
cs->exception_index = EXCP_TLBD;
}
if (rw == 2) {
env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
} else {
env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
}
env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
env->regs[CR_BADADDR] = address;
return 1;
}
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
CPUNios2State *env = &cpu->env;
if (cpu->mmu_present) {
if (MMU_SUPERVISOR_IDX == mmu_idx) {
if (address >= 0xC0000000) {
/* Kernel physical page - TLB bypassed */
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, address, PAGE_BITS,
mmu_idx, TARGET_PAGE_SIZE);
} else if (address >= 0x80000000) {
/* Kernel virtual page */
return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
} else {
/* User virtual page */
return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
}
} else {
if (address >= 0x80000000) {
/* Illegal access from user mode */
cs->exception_index = EXCP_SUPERA;
env->regs[CR_BADADDR] = address;
return 1;
} else {
/* User virtual page */
return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
}
}
} else {
/* No MMU */
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, address, PAGE_BITS,
mmu_idx, TARGET_PAGE_SIZE);
}
return 0;
}
hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
@ -321,4 +235,80 @@ void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2;
helper_raise_exception(env, EXCP_UNALIGN);
}
bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
CPUNios2State *env = &cpu->env;
unsigned int excp = EXCP_TLBD;
target_ulong vaddr, paddr;
Nios2MMULookup lu;
unsigned int hit;
if (!cpu->mmu_present) {
/* No MMU */
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, address, PAGE_BITS,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (MMU_SUPERVISOR_IDX == mmu_idx) {
if (address >= 0xC0000000) {
/* Kernel physical page - TLB bypassed */
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, address, PAGE_BITS,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
} else {
if (address >= 0x80000000) {
/* Illegal access from user mode */
if (probe) {
return false;
}
cs->exception_index = EXCP_SUPERA;
env->regs[CR_BADADDR] = address;
cpu_loop_exit_restore(cs, retaddr);
}
}
/* Virtual page. */
hit = mmu_translate(env, &lu, address, access_type, mmu_idx);
if (hit) {
vaddr = address & TARGET_PAGE_MASK;
paddr = lu.paddr + vaddr - lu.vaddr;
if (((access_type == MMU_DATA_LOAD) && (lu.prot & PAGE_READ)) ||
((access_type == MMU_DATA_STORE) && (lu.prot & PAGE_WRITE)) ||
((access_type == MMU_INST_FETCH) && (lu.prot & PAGE_EXEC))) {
tlb_set_page(cs, vaddr, paddr, lu.prot,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
/* Permission violation */
excp = (access_type == MMU_DATA_LOAD ? EXCP_TLBR :
access_type == MMU_DATA_STORE ? EXCP_TLBW : EXCP_TLBX);
}
if (probe) {
return false;
}
if (access_type == MMU_INST_FETCH) {
env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
} else {
env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
}
env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
cs->exception_index = excp;
env->regs[CR_BADADDR] = address;
cpu_loop_exit_restore(cs, retaddr);
}
#endif /* !CONFIG_USER_ONLY */

View File

@ -36,18 +36,6 @@
#define MMU_LOG(x)
#endif
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = nios2_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
void mmu_read_debug(CPUNios2State *env, uint32_t rn)
{
switch (rn) {

View File

@ -149,9 +149,8 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
#else
cc->tlb_fill = openrisc_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu;
#endif

View File

@ -344,8 +344,9 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);

View File

@ -107,16 +107,42 @@ static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
cpu->env.lock_addr = -1;
}
int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
#ifdef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
raise_mmu_exception(cpu, address, EXCP_DPF);
return 1;
#else
g_assert_not_reached();
int excp = EXCP_DPF;
#ifndef CONFIG_USER_ONLY
int prot;
hwaddr phys_addr;
if (mmu_idx == MMU_NOMMU_IDX) {
/* The mmu is disabled; lookups never fail. */
get_phys_nommu(&phys_addr, &prot, addr);
excp = 0;
} else {
bool super = mmu_idx == MMU_SUPERVISOR_IDX;
int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
: access_type == MMU_DATA_STORE ? PAGE_WRITE
: PAGE_READ);
excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
}
if (likely(excp == 0)) {
tlb_set_page(cs, addr & TARGET_PAGE_MASK,
phys_addr & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
return false;
}
#endif
raise_mmu_exception(cpu, addr, excp);
cpu_loop_exit_restore(cs, retaddr);
}
#ifndef CONFIG_USER_ONLY
@ -152,33 +178,4 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
}
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int prot, excp;
hwaddr phys_addr;
if (mmu_idx == MMU_NOMMU_IDX) {
/* The mmu is disabled; lookups never fail. */
get_phys_nommu(&phys_addr, &prot, addr);
excp = 0;
} else {
bool super = mmu_idx == MMU_SUPERVISOR_IDX;
int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
: access_type == MMU_DATA_STORE ? PAGE_WRITE
: PAGE_READ);
excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
}
if (unlikely(excp)) {
raise_mmu_exception(cpu, addr, excp);
cpu_loop_exit_restore(cs, retaddr);
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK,
phys_addr & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
}
#endif

View File

@ -1311,10 +1311,9 @@ void ppc_translate_init(void);
* is returned if the signal was handled by the virtual CPU.
*/
int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
#if defined(CONFIG_USER_ONLY)
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#endif
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);

View File

@ -3057,15 +3057,9 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
/*****************************************************************************/
/*
* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*
* XXX: fix it to restore all registers
*/
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
@ -3078,7 +3072,11 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
}
if (unlikely(ret != 0)) {
if (probe) {
return false;
}
raise_exception_err_ra(env, cs->exception_index, env->error_code,
retaddr);
}
return true;
}

View File

@ -10592,9 +10592,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_ppc_cpu;
#endif
@ -10624,6 +10622,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = ppc_translate_init;
cc->tlb_fill = ppc_cpu_tlb_fill;
#endif
cc->disas_set_info = ppc_disas_set_info;

View File

@ -20,21 +20,24 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int exception, error_code;
if (rw == 2) {
if (access_type == MMU_INST_FETCH) {
exception = POWERPC_EXCP_ISI;
error_code = 0x40000000;
} else {
exception = POWERPC_EXCP_DSI;
error_code = 0x40000000;
if (rw) {
if (access_type == MMU_DATA_STORE) {
error_code |= 0x02000000;
}
env->spr[SPR_DAR] = address;
@ -42,6 +45,5 @@ int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
}
cs->exception_index = exception;
env->error_code = error_code;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}

View File

@ -355,14 +355,13 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
#endif
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = riscv_cpu_disas_set_info;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = riscv_translate_init;
cc->tlb_fill = riscv_cpu_tlb_fill;
#endif
/* For now, mark unmigratable: */
cc->vmsd = &vmstate_riscv_cpu;

View File

@ -261,8 +261,9 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr);
int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(void);

View File

@ -378,54 +378,44 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->badaddr = addr;
riscv_raise_exception(env, cs->exception_index, retaddr);
}
/* called by qemu's softmmu to fill the qemu tlb */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret == TRANSLATE_FAIL) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
riscv_raise_exception(env, cs->exception_index, retaddr);
}
}
#endif
int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr pa = 0;
int prot;
#endif
int ret = TRANSLATE_FAIL;
qemu_log_mask(CPU_LOG_MMU,
"%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
%d\n", __func__, env->pc, address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx);
#if !defined(CONFIG_USER_ONLY)
ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
" prot %d\n", __func__, address, ret, pa, prot);
"%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
" prot %d\n", __func__, address, ret, pa, prot);
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) {
ret = TRANSLATE_FAIL;
}
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
} else if (ret == TRANSLATE_FAIL) {
raise_mmu_exception(env, address, rw);
return true;
} else if (probe) {
return false;
} else {
raise_mmu_exception(env, address, access_type);
riscv_raise_exception(env, cs->exception_index, retaddr);
}
#else
switch (rw) {
switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
break;
@ -436,8 +426,8 @@ int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
break;
}
cpu_loop_exit_restore(cs, retaddr);
#endif
return ret;
}
/*

View File

@ -478,9 +478,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = s390_cpu_set_pc;
cc->gdb_read_register = s390_cpu_gdb_read_register;
cc->gdb_write_register = s390_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
#else
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_s390_cpu;
cc->write_elf64_note = s390_cpu_write_elf64_note;
@ -493,6 +491,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = s390_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = s390x_translate_init;
cc->tlb_fill = s390_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;

View File

@ -74,8 +74,9 @@ void s390_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
@ -83,7 +84,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
/* On real machines this value is dropped into LowMem. Since this
is userland, simply put this someplace that cpu_loop can find it. */
cpu->env.__excp_addr = address;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
@ -102,19 +103,20 @@ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
}
}
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
int rw, int mmu_idx)
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
target_ulong vaddr, raddr;
uint64_t asc;
int prot;
int prot, fail;
qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, orig_vaddr, rw, mmu_idx);
__func__, address, access_type, mmu_idx);
vaddr = orig_vaddr;
vaddr = address;
if (mmu_idx < MMU_REAL_IDX) {
asc = cpu_mmu_idx_to_asc(mmu_idx);
@ -122,39 +124,58 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
return 1;
}
fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true);
} else if (mmu_idx == MMU_REAL_IDX) {
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
return 1;
}
fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot);
} else {
abort();
g_assert_not_reached();
}
/* check out of RAM access */
if (!address_space_access_valid(&address_space_memory, raddr,
TARGET_PAGE_SIZE, rw,
if (!fail &&
!address_space_access_valid(&address_space_memory, raddr,
TARGET_PAGE_SIZE, access_type,
MEMTXATTRS_UNSPECIFIED)) {
qemu_log_mask(CPU_LOG_MMU,
"%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
__func__, (uint64_t)raddr, (uint64_t)ram_size);
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
return 1;
fail = 1;
}
qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
if (!fail) {
qemu_log_mask(CPU_LOG_MMU,
"%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
return false;
}
tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
cpu_restore_state(cs, retaddr, true);
return 0;
/*
* The ILC value for code accesses is undefined. The important
* thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
* which would cause do_program_interrupt to attempt to read from
* env->psw.addr again. C.f. the condition in trigger_page_fault,
* but is not universally applied.
*
* ??? If we remove ILEN_AUTO, by moving the computation of ILEN
* into cpu_restore_state, then we may remove this entirely.
*/
if (access_type == MMU_INST_FETCH) {
env->int_pgm_ilen = 2;
}
cpu_loop_exit(cs);
}
static void do_program_interrupt(CPUS390XState *env)

View File

@ -263,8 +263,9 @@ ObjectClass *s390_cpu_class_by_name(const char *name);
void s390x_cpu_debug_excp_handler(CPUState *cs);
void s390_cpu_do_interrupt(CPUState *cpu);
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);

View File

@ -33,22 +33,6 @@
/*****************************************************************************/
/* Softmmu support */
#if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret != 0)) {
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
/* #define DEBUG_HELPER */
#ifdef DEBUG_HELPER

View File

@ -229,9 +229,8 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
cc->gdb_read_register = superh_cpu_gdb_read_register;
cc->gdb_write_register = superh_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
#else
cc->tlb_fill = superh_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif

View File

@ -243,8 +243,9 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
void sh4_translate_init(void);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void sh4_cpu_list(void);
#if !defined(CONFIG_USER_ONLY)

View File

@ -27,43 +27,6 @@
#include "hw/sh4/sh_intc.h"
#endif
#if defined(CONFIG_USER_ONLY)
void superh_cpu_do_interrupt(CPUState *cs)
{
cs->exception_index = -1;
}
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
CPUSH4State *env = &cpu->env;
env->tea = address;
cs->exception_index = -1;
switch (rw) {
case 0:
cs->exception_index = 0x0a0;
break;
case 1:
cs->exception_index = 0x0c0;
break;
case 2:
cs->exception_index = 0x0a0;
break;
}
return 1;
}
int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
{
/* For user mode, only U0 area is cacheable. */
return !(addr & 0x80000000);
}
#else /* !CONFIG_USER_ONLY */
#define MMU_OK 0
#define MMU_ITLB_MISS (-1)
#define MMU_ITLB_MULTIPLE (-2)
@ -79,6 +42,21 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
#define MMU_DADDR_ERROR_READ (-12)
#define MMU_DADDR_ERROR_WRITE (-13)
#if defined(CONFIG_USER_ONLY)
void superh_cpu_do_interrupt(CPUState *cs)
{
cs->exception_index = -1;
}
int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
{
/* For user mode, only U0 area is cacheable. */
return !(addr & 0x80000000);
}
#else /* !CONFIG_USER_ONLY */
void superh_cpu_do_interrupt(CPUState *cs)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@ -458,69 +436,6 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
return get_mmu_address(env, physical, prot, address, rw, access_type);
}
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
CPUSH4State *env = &cpu->env;
target_ulong physical;
int prot, ret, access_type;
access_type = ACCESS_INT;
ret =
get_physical_address(env, &physical, &prot, address, rw,
access_type);
if (ret != MMU_OK) {
env->tea = address;
if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
env->pteh = (env->pteh & PTEH_ASID_MASK) |
(address & PTEH_VPN_MASK);
}
switch (ret) {
case MMU_ITLB_MISS:
case MMU_DTLB_MISS_READ:
cs->exception_index = 0x040;
break;
case MMU_DTLB_MULTIPLE:
case MMU_ITLB_MULTIPLE:
cs->exception_index = 0x140;
break;
case MMU_ITLB_VIOLATION:
cs->exception_index = 0x0a0;
break;
case MMU_DTLB_MISS_WRITE:
cs->exception_index = 0x060;
break;
case MMU_DTLB_INITIAL_WRITE:
cs->exception_index = 0x080;
break;
case MMU_DTLB_VIOLATION_READ:
cs->exception_index = 0x0a0;
break;
case MMU_DTLB_VIOLATION_WRITE:
cs->exception_index = 0x0c0;
break;
case MMU_IADDR_ERROR:
case MMU_DADDR_ERROR_READ:
cs->exception_index = 0x0e0;
break;
case MMU_DADDR_ERROR_WRITE:
cs->exception_index = 0x100;
break;
default:
cpu_abort(cs, "Unhandled MMU fault");
}
return 1;
}
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@ -745,7 +660,6 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
if (needs_tlb_flush) {
tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
}
} else {
int index = (addr & 0x00003f00) >> 8;
tlb_t * entry = &s->utlb[index];
@ -885,3 +799,76 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
CPUSH4State *env = &cpu->env;
int ret;
#ifdef CONFIG_USER_ONLY
ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE :
access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION :
MMU_DTLB_VIOLATION_READ);
#else
target_ulong physical;
int prot, sh_access_type;
sh_access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot, address,
access_type, sh_access_type);
if (ret == MMU_OK) {
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
return false;
}
if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
}
#endif
env->tea = address;
switch (ret) {
case MMU_ITLB_MISS:
case MMU_DTLB_MISS_READ:
cs->exception_index = 0x040;
break;
case MMU_DTLB_MULTIPLE:
case MMU_ITLB_MULTIPLE:
cs->exception_index = 0x140;
break;
case MMU_ITLB_VIOLATION:
cs->exception_index = 0x0a0;
break;
case MMU_DTLB_MISS_WRITE:
cs->exception_index = 0x060;
break;
case MMU_DTLB_INITIAL_WRITE:
cs->exception_index = 0x080;
break;
case MMU_DTLB_VIOLATION_READ:
cs->exception_index = 0x0a0;
break;
case MMU_DTLB_VIOLATION_WRITE:
cs->exception_index = 0x0c0;
break;
case MMU_IADDR_ERROR:
case MMU_DADDR_ERROR_READ:
cs->exception_index = 0x0e0;
break;
case MMU_DADDR_ERROR_WRITE:
cs->exception_index = 0x100;
break;
default:
cpu_abort(cs, "Unhandled MMU fault");
}
cpu_loop_exit_restore(cs, retaddr);
}

View File

@ -41,18 +41,6 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
cpu_loop_exit_restore(cs, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = superh_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
void helper_ldtlb(CPUSH4State *env)

View File

@ -875,9 +875,8 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->gdb_read_register = sparc_cpu_gdb_read_register;
cc->gdb_write_register = sparc_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
#else
cc->tlb_fill = sparc_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unassigned_access = sparc_cpu_unassigned_access;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;

View File

@ -579,8 +579,9 @@ void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(void);
/* mmu_helper.c */
int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(CPUSPARCState *env);

View File

@ -1924,19 +1924,4 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
#endif
cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
}
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif

View File

@ -27,13 +27,14 @@
#if defined(CONFIG_USER_ONLY)
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
if (rw & 2) {
if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
@ -43,7 +44,7 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
env->mmuregs[4] = address;
#endif
}
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else
@ -208,8 +209,9 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
@ -218,16 +220,26 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
target_ulong page_size;
int error_code = 0, prot, access_index;
/*
* TODO: If we ever need tlb_vaddr_to_host for this target,
* then we must figure out how to manipulate FSR and FAR
* when both MMU_NF and probe are set. In the meantime,
* do not support this use case.
*/
assert(!probe);
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
address, rw, mmu_idx, &page_size);
address, access_type,
mmu_idx, &page_size);
vaddr = address;
if (error_code == 0) {
if (likely(error_code == 0)) {
qemu_log_mask(CPU_LOG_MMU,
"Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr);
"Translate at %" VADDR_PRIx " -> "
TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
address, paddr, vaddr);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
return true;
}
if (env->mmuregs[3]) { /* Fault status register */
@ -243,14 +255,14 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
switching to normal mode. */
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
return true;
} else {
if (rw & 2) {
if (access_type == MMU_INST_FETCH) {
cs->exception_index = TT_TFAULT;
} else {
cs->exception_index = TT_DFAULT;
}
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
}
@ -713,8 +725,9 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
@ -725,8 +738,9 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
address, rw, mmu_idx, &page_size);
if (error_code == 0) {
address, access_type,
mmu_idx, &page_size);
if (likely(error_code == 0)) {
vaddr = address;
trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
@ -734,10 +748,12 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
env->dmmu.mmu_secondary_context);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
return true;
}
/* XXX */
return 1;
if (probe) {
return false;
}
cpu_loop_exit_restore(cs, retaddr);
}
void dump_mmu(CPUSPARCState *env)

View File

@ -25,6 +25,7 @@
#include "hw/qdev-properties.h"
#include "linux-user/syscall_defs.h"
#include "qemu/qemu-print.h"
#include "exec/exec-all.h"
static void tilegx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
@ -111,8 +112,9 @@ static void tilegx_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
static bool tilegx_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
TileGXCPU *cpu = TILEGX_CPU(cs);
@ -122,7 +124,7 @@ static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
cpu->env.signo = TARGET_SIGSEGV;
cpu->env.sigcode = 0;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@ -152,7 +154,7 @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
cc->dump_state = tilegx_cpu_dump_state;
cc->set_pc = tilegx_cpu_set_pc;
cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
cc->tlb_fill = tilegx_cpu_tlb_fill;
cc->gdb_num_core_regs = 0;
cc->tcg_initialize = tilegx_tcg_init;
}

View File

@ -166,6 +166,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_attrs_debug = tricore_cpu_get_phys_page_attrs_debug;
cc->tcg_initialize = tricore_tcg_init;
cc->tlb_fill = tricore_cpu_tlb_fill;
}
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \

View File

@ -417,8 +417,8 @@ static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
/* helpers.c */
int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address,
int rw, int mmu_idx);
#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault
bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#endif /* TRICORE_CPU_H */

View File

@ -50,8 +50,9 @@ static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
{
}
int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
int rw, int mmu_idx)
bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType rw, int mmu_idx,
bool probe, uintptr_t retaddr)
{
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
@ -64,20 +65,24 @@ int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type);
qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx
" prot %d\n", __func__, address, ret, physical, prot);
qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical "
TARGET_FMT_plx " prot %d\n",
__func__, (target_ulong)address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0) {
return true;
} else {
assert(ret < 0);
if (probe) {
return false;
}
raise_mmu_exception(env, address, rw, ret);
ret = 1;
cpu_loop_exit_restore(cs, retaddr);
}
return ret;
}
static void tricore_cpu_list_entry(gpointer data, gpointer user_data)

View File

@ -2793,29 +2793,3 @@ uint32_t helper_psw_read(CPUTriCoreState *env)
{
return psw_read(env);
}
static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
uint32_t exception,
int error_code,
uintptr_t pc)
{
CPUState *cs = CPU(tricore_env_get_cpu(env));
cs->exception_index = exception;
env->error_code = error_code;
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, pc);
}
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (ret) {
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
do_raise_exception_err(env, cs->exception_index,
env->error_code, retaddr);
}
}

View File

@ -138,11 +138,8 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
cc->dump_state = uc32_cpu_dump_state;
cc->set_pc = uc32_cpu_set_pc;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault;
#else
cc->tlb_fill = uc32_cpu_tlb_fill;
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
#endif
cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu;
}

View File

@ -178,8 +178,9 @@ static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc
}
}
int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void uc32_translate_init(void);
void switch_mode(CPUUniCore32State *, int);

View File

@ -215,29 +215,6 @@ void helper_cp1_putc(target_ulong x)
}
#endif
#ifdef CONFIG_USER_ONLY
void switch_mode(CPUUniCore32State *env, int mode)
{
UniCore32CPU *cpu = uc32_env_get_cpu(env);
if (mode != ASR_MODE_USER) {
cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
}
}
void uc32_cpu_do_interrupt(CPUState *cs)
{
cpu_abort(cs, "NO interrupt in user mode\n");
}
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int access_type, int mmu_idx)
{
cpu_abort(cs, "NO mmu fault in user mode\n");
return 1;
}
#endif
bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {

View File

@ -242,17 +242,3 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
#ifndef CONFIG_USER_ONLY
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = uc32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
}
}
#endif

View File

@ -215,8 +215,9 @@ do_fault:
return code;
}
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int access_type, int mmu_idx)
bool uc32_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);
CPUUniCore32State *env = &cpu->env;
@ -257,7 +258,11 @@ int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
return true;
}
if (probe) {
return false;
}
env->cp0.c3_faultstatus = ret;
@ -267,7 +272,7 @@ int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
} else {
cs->exception_index = UC32_EXCP_DTRAP;
}
return ret;
cpu_loop_exit_restore(cs, retaddr);
}
hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)

View File

@ -181,9 +181,8 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
cc->gdb_stop_before_watchpoint = true;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = xtensa_cpu_handle_mmu_fault;
#else
cc->tlb_fill = xtensa_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;

View File

@ -552,8 +552,9 @@ static inline XtensaCPU *xtensa_env_get_cpu(const CPUXtensaState *env)
#define ENV_OFFSET offsetof(XtensaCPU, env)
int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int size,
int mmu_idx);
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,

View File

@ -240,19 +240,21 @@ void xtensa_cpu_list(void)
#ifdef CONFIG_USER_ONLY
int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT,
"%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n",
__func__, rw, address, size);
__func__, access_type, address, size);
env->sregs[EXCVADDR] = address;
env->sregs[EXCCAUSE] = rw ? STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE;
env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ?
STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE);
cs->exception_index = EXC_USER;
return 1;
cpu_loop_exit_restore(cs, retaddr);
}
#else
@ -273,28 +275,33 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
}
}
void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
uint32_t paddr;
uint32_t page_size;
unsigned access;
int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
&paddr, &page_size, &access);
int ret = xtensa_get_physical_addr(env, true, address, access_type,
mmu_idx, &paddr, &page_size, &access);
qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
__func__, vaddr, access_type, mmu_idx, paddr, ret);
qemu_log_mask(CPU_LOG_MMU, "%s(%08" VADDR_PRIx
", %d, %d) -> %08x, ret = %d\n",
__func__, address, access_type, mmu_idx, paddr, ret);
if (ret == 0) {
tlb_set_page(cs,
vaddr & TARGET_PAGE_MASK,
address & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
return true;
} else if (probe) {
return false;
} else {
cpu_restore_state(cs, retaddr, true);
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
}
}