cputlb: Pass cpu_transaction_failed() the correct physaddr
The API for cpu_transaction_failed() says that it takes the physical address for the failed transaction. However we were actually passing it the offset within the target MemoryRegion. We don't currently have any target CPU implementations of this hook that require the physical address; fix this bug so we don't get confused if we ever do add one. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20180611125633.32755-3-peter.maydell@linaro.org
This commit is contained in:
parent
ace4109011
commit
2d54f19401
@ -777,13 +777,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
target_ulong addr, uintptr_t retaddr, int size)
|
target_ulong addr, uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
hwaddr physaddr = iotlbentry->addr;
|
hwaddr mr_offset;
|
||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegionSection *section;
|
||||||
|
MemoryRegion *mr;
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
|
mr = section->mr;
|
||||||
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
@ -795,9 +798,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
r = memory_region_dispatch_read(mr, physaddr,
|
r = memory_region_dispatch_read(mr, mr_offset,
|
||||||
&val, size, iotlbentry->attrs);
|
&val, size, iotlbentry->attrs);
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
|
hwaddr physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
}
|
}
|
||||||
@ -814,12 +821,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
uintptr_t retaddr, int size)
|
uintptr_t retaddr, int size)
|
||||||
{
|
{
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
hwaddr physaddr = iotlbentry->addr;
|
hwaddr mr_offset;
|
||||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
|
MemoryRegionSection *section;
|
||||||
|
MemoryRegion *mr;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
|
mr = section->mr;
|
||||||
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
@ -830,9 +840,13 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
locked = true;
|
locked = true;
|
||||||
}
|
}
|
||||||
r = memory_region_dispatch_write(mr, physaddr,
|
r = memory_region_dispatch_write(mr, mr_offset,
|
||||||
val, size, iotlbentry->attrs);
|
val, size, iotlbentry->attrs);
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
|
hwaddr physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
}
|
}
|
||||||
@ -880,12 +894,13 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||||||
*/
|
*/
|
||||||
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
{
|
{
|
||||||
int mmu_idx, index, pd;
|
int mmu_idx, index;
|
||||||
void *p;
|
void *p;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
|
MemoryRegionSection *section;
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
CPUState *cpu = ENV_GET_CPU(env);
|
||||||
CPUIOTLBEntry *iotlbentry;
|
CPUIOTLBEntry *iotlbentry;
|
||||||
hwaddr physaddr;
|
hwaddr physaddr, mr_offset;
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
mmu_idx = cpu_mmu_index(env, true);
|
mmu_idx = cpu_mmu_index(env, true);
|
||||||
@ -896,8 +911,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||||
pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
|
mr = section->mr;
|
||||||
if (memory_region_is_unassigned(mr)) {
|
if (memory_region_is_unassigned(mr)) {
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
if (memory_region_request_mmio_ptr(mr, addr)) {
|
if (memory_region_request_mmio_ptr(mr, addr)) {
|
||||||
@ -918,7 +933,10 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||||||
* and use the MemTXResult it produced). However it is the
|
* and use the MemTXResult it produced). However it is the
|
||||||
* simplest place we have currently available for the check.
|
* simplest place we have currently available for the check.
|
||||||
*/
|
*/
|
||||||
physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
|
physaddr = mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region;
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
|
cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
|
||||||
iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
|
iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
|
||||||
|
|
||||||
|
5
exec.c
5
exec.c
@ -2897,14 +2897,15 @@ static const MemoryRegionOps readonly_mem_ops = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
|
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
|
hwaddr index, MemTxAttrs attrs)
|
||||||
{
|
{
|
||||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||||
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
||||||
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
|
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
|
||||||
MemoryRegionSection *sections = d->map.sections;
|
MemoryRegionSection *sections = d->map.sections;
|
||||||
|
|
||||||
return sections[index & ~TARGET_PAGE_MASK].mr;
|
return §ions[index & ~TARGET_PAGE_MASK];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_mem_init(void)
|
static void io_mem_init(void)
|
||||||
|
@ -437,8 +437,17 @@ void tb_lock_reset(void);
|
|||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
|
/**
|
||||||
hwaddr index, MemTxAttrs attrs);
|
* iotlb_to_section:
|
||||||
|
* @cpu: CPU performing the access
|
||||||
|
* @index: TCG CPU IOTLB entry
|
||||||
|
*
|
||||||
|
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
|
||||||
|
* it refers to. @index will have been initially created and returned
|
||||||
|
* by memory_region_section_get_iotlb().
|
||||||
|
*/
|
||||||
|
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
|
hwaddr index, MemTxAttrs attrs);
|
||||||
|
|
||||||
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
||||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
|
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
|
||||||
|
Loading…
Reference in New Issue
Block a user