cputlb: Change tlb_set_page() argument to CPUState

Signed-off-by: Andreas Färber <afaerber@suse.de>
This commit is contained in:
Andreas Färber 2013-09-03 13:59:37 +02:00
parent 00c8cb0a36
commit 0c591eb0a9
20 changed files with 33 additions and 34 deletions

View File

@ -221,10 +221,11 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
/* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
{
CPUArchState *env = cpu->env_ptr;
MemoryRegionSection *section;
unsigned int index;
target_ulong address;
@ -232,7 +233,6 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
uintptr_t addend;
CPUTLBEntry *te;
hwaddr iotlb, xlat, sz;
CPUState *cpu = ENV_GET_CPU(env);
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {

View File

@ -100,7 +100,7 @@ void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
void tlb_flush_page(CPUState *cpu, target_ulong addr);
void tlb_flush(CPUState *cpu, int flush_global);
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);

View File

@ -345,7 +345,7 @@ int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
return 1;
}
tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}

View File

@ -3676,7 +3676,7 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
/* Map a single [sub]page. */
phys_addr &= ~(hwaddr)0x3ff;
address &= ~(uint32_t)0x3ff;
tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}

View File

@ -106,7 +106,7 @@ int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
*/
phy = res.phy & ~0x80000000;
prot = res.prot;
tlb_set_page(env, address & TARGET_PAGE_MASK, phy,
tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
}

View File

@ -877,7 +877,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
do_fault_protect:
error_code = PG_ERROR_P_MASK;

View File

@ -30,10 +30,10 @@ int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
if (env->flags & LM32_FLAG_IGNORE_MSB) {
tlb_set_page(env, address, address & 0x7fffffff, prot, mmu_idx,
TARGET_PAGE_SIZE);
tlb_set_page(cs, address, address & 0x7fffffff, prot, mmu_idx,
TARGET_PAGE_SIZE);
} else {
tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
}
return 0;

View File

@ -303,12 +303,11 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
M68kCPU *cpu = M68K_CPU(cs);
int prot;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(&cpu->env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}

View File

@ -77,7 +77,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot));
tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
} else {
env->sregs[SR_EAR] = address;
@ -108,7 +108,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
}
return r;

View File

@ -300,7 +300,7 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
" prot %d\n",
__func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
tlb_set_page(env, address & TARGET_PAGE_MASK,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;

View File

@ -148,7 +148,7 @@ int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
phy = res.phy;
r = 0;
}
tlb_set_page(env, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
return r;
}

View File

@ -187,7 +187,7 @@ int openrisc_cpu_handle_mmu_fault(CPUState *cs,
address, rw);
if (ret == TLBRET_MATCH) {
tlb_set_page(&cpu->env, address & TARGET_PAGE_MASK,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;

View File

@ -400,7 +400,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
/* Translation is off */
raddr = eaddr;
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@ -427,7 +427,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
return 1;
}
tlb_set_page(env, eaddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@ -441,7 +441,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
if (sr & SR32_T) {
if (ppc_hash32_direct_store(env, sr, eaddr, rwx,
&raddr, &prot) == 0) {
tlb_set_page(env, eaddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@ -522,7 +522,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
raddr = ppc_hash32_pte_raddr(sr, pte, eaddr);
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;

View File

@ -476,7 +476,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
/* Translation is off */
/* In real mode the top 4 effective address bits are ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@ -578,7 +578,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;

View File

@ -1514,7 +1514,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
}
ret = get_physical_address(env, &ctx, address, rw, access_type);
if (ret == 0) {
tlb_set_page(env, address & TARGET_PAGE_MASK,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;

View File

@ -417,7 +417,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __func__,
(uint64_t)vaddr, (uint64_t)raddr, prot);
tlb_set_page(env, orig_vaddr, raddr, prot,
tlb_set_page(cs, orig_vaddr, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
return 0;

View File

@ -512,7 +512,7 @@ int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}

View File

@ -217,7 +217,7 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
printf("Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr);
#endif
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
}
@ -233,7 +233,7 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
neverland. Fake/overridden mappings will be flushed when
switching to normal mode. */
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
} else {
if (rw & 2) {
@ -729,7 +729,7 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
}
/* XXX */

View File

@ -253,7 +253,7 @@ int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
/* Map a single page. */
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
tlb_set_page(env, address, phys_addr, prot, mmu_idx, page_size);
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}

View File

@ -77,10 +77,10 @@ void tlb_fill(CPUState *cs,
vaddr, is_write, mmu_idx, paddr, ret);
if (ret == 0) {
tlb_set_page(env,
vaddr & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
tlb_set_page(cs,
vaddr & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
access, mmu_idx, page_size);
} else {
cpu_restore_state(cs, retaddr);
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);