exec: Make stq_*_phys input an AddressSpace

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
This commit is contained in:
Edgar E. Iglesias 2013-11-28 00:11:44 +01:00
parent 41701aa4ee
commit f606604f1c
18 changed files with 131 additions and 91 deletions

12
exec.c
View File

@ -2683,22 +2683,22 @@ void stw_be_phys(hwaddr addr, uint32_t val)
}
/* XXX: optimize */
void stq_phys(hwaddr addr, uint64_t val)
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = tswap64(val);
cpu_physical_memory_write(addr, &val, 8);
address_space_rw(as, addr, (void *) &val, 8, 1);
}
void stq_le_phys(hwaddr addr, uint64_t val)
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = cpu_to_le64(val);
cpu_physical_memory_write(addr, &val, 8);
address_space_rw(as, addr, (void *) &val, 8, 1);
}
void stq_be_phys(hwaddr addr, uint64_t val)
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
val = cpu_to_be64(val);
cpu_physical_memory_write(addr, &val, 8);
address_space_rw(as, addr, (void *) &val, 8, 1);
}
/* virtual memory access for debug (includes writing to ROM) */

View File

@ -161,8 +161,9 @@ static void clipper_init(QEMUMachineInitArgs *args)
load_image_targphys(initrd_filename, initrd_base,
ram_size - initrd_base);
stq_phys(param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL);
stq_phys(param_offset + 0x108, initrd_size);
stq_phys(&address_space_memory,
param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL);
stq_phys(&address_space_memory, param_offset + 0x108, initrd_size);
}
}
}

View File

@ -119,7 +119,7 @@ static inline void
vmw_shmem_st64(hwaddr addr, uint64_t value)
{
VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
stq_le_phys(addr, value);
stq_le_phys(&address_space_memory, addr, value);
}
/* Macros for simplification of operations on array-style registers */

View File

@ -559,6 +559,8 @@ static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
target_ulong size = args[0];
target_ulong addr = args[1];
target_ulong val = args[2];
@ -574,7 +576,7 @@ static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
stl_phys(addr, val);
return H_SUCCESS;
case 8:
stq_phys(addr, val);
stq_phys(cs->as, addr, val);
return H_SUCCESS;
}
return H_PARAMETER;
@ -639,7 +641,7 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
stl_phys(dst, tmp);
break;
case 3:
stq_phys(dst, tmp);
stq_phys(cs->as, dst, tmp);
break;
}
dst = dst + step;

View File

@ -378,7 +378,8 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
vring = s390_virtio_next_ring(bus);
virtio_queue_set_addr(dev->vdev, i, vring);
virtio_queue_set_vector(dev->vdev, i, i);
stq_be_phys(vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
stq_be_phys(&address_space_memory,
vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
stw_be_phys(vq + VIRTIO_VQCONFIG_OFFS_NUM, virtio_queue_get_num(dev->vdev, i));
}

View File

@ -873,7 +873,7 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
}
indicators = ldq_phys(&address_space_memory, dev->indicators);
indicators |= 1ULL << vector;
stq_phys(dev->indicators, indicators);
stq_phys(&address_space_memory, dev->indicators, indicators);
} else {
if (!dev->indicators2) {
return;
@ -881,7 +881,7 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
vector = 0;
indicators = ldq_phys(&address_space_memory, dev->indicators2);
indicators |= 1ULL << vector;
stq_phys(dev->indicators2, indicators);
stq_phys(&address_space_memory, dev->indicators2, indicators);
}
css_conditional_io_interrupt(sch);

View File

@ -517,7 +517,8 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
tail = s->reply_queue_head;
if (megasas_use_queue64(s)) {
queue_offset = tail * sizeof(uint64_t);
stq_le_phys(s->reply_queue_pa + queue_offset, context);
stq_le_phys(&address_space_memory,
s->reply_queue_pa + queue_offset, context);
} else {
queue_offset = tail * sizeof(uint32_t);
stl_le_phys(s->reply_queue_pa + queue_offset, context);

View File

@ -95,8 +95,8 @@ void stw_le_phys(hwaddr addr, uint32_t val);
void stw_be_phys(hwaddr addr, uint32_t val);
void stl_le_phys(hwaddr addr, uint32_t val);
void stl_be_phys(hwaddr addr, uint32_t val);
void stq_le_phys(hwaddr addr, uint64_t val);
void stq_be_phys(hwaddr addr, uint64_t val);
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#ifdef NEED_CPU_H
uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
@ -105,7 +105,7 @@ uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
void stl_phys_notdirty(hwaddr addr, uint32_t val);
void stw_phys(hwaddr addr, uint32_t val);
void stl_phys(hwaddr addr, uint32_t val);
void stq_phys(hwaddr addr, uint64_t val);
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#endif
void cpu_physical_memory_write_rom(hwaddr addr,

View File

@ -106,7 +106,7 @@ DEF_HELPER_2(ldq_phys, i64, env, i64)
DEF_HELPER_2(ldl_l_phys, i64, env, i64)
DEF_HELPER_2(ldq_l_phys, i64, env, i64)
DEF_HELPER_2(stl_phys, void, i64, i64)
DEF_HELPER_2(stq_phys, void, i64, i64)
DEF_HELPER_3(stq_phys, void, env, i64, i64)
DEF_HELPER_3(stl_c_phys, i64, env, i64, i64)
DEF_HELPER_3(stq_c_phys, i64, env, i64, i64)

View File

@ -55,9 +55,10 @@ void helper_stl_phys(uint64_t p, uint64_t v)
stl_phys(p, v);
}
void helper_stq_phys(uint64_t p, uint64_t v)
void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
stq_phys(p, v);
CPUState *cs = ENV_GET_CPU(env);
stq_phys(cs->as, p, v);
}
uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
@ -85,7 +86,7 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
if (p == env->lock_addr) {
uint64_t old = ldq_phys(cs->as, p);
if (old == env->lock_value) {
stq_phys(p, v);
stq_phys(cs->as, p, v);
ret = 1;
}
}

View File

@ -3229,7 +3229,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x1:
/* Quadword physical access */
gen_helper_stq_phys(addr, val);
gen_helper_stq_phys(cpu_env, addr, val);
break;
case 0x2:
/* Longword physical access with lock */

View File

@ -881,7 +881,8 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;

View File

@ -43,6 +43,7 @@ void helper_rsm(CPUX86State *env)
void do_smm_enter(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
CPUState *cs = CPU(cpu);
target_ulong sm_state;
SegmentCache *dt;
int i, offset;
@ -62,39 +63,39 @@ void do_smm_enter(X86CPU *cpu)
stw_phys(sm_state + offset, dt->selector);
stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
stl_phys(sm_state + offset + 4, dt->limit);
stq_phys(sm_state + offset + 8, dt->base);
stq_phys(cs->as, sm_state + offset + 8, dt->base);
}
stq_phys(sm_state + 0x7e68, env->gdt.base);
stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
stl_phys(sm_state + 0x7e64, env->gdt.limit);
stw_phys(sm_state + 0x7e70, env->ldt.selector);
stq_phys(sm_state + 0x7e78, env->ldt.base);
stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
stl_phys(sm_state + 0x7e74, env->ldt.limit);
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
stq_phys(sm_state + 0x7e88, env->idt.base);
stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
stl_phys(sm_state + 0x7e84, env->idt.limit);
stw_phys(sm_state + 0x7e90, env->tr.selector);
stq_phys(sm_state + 0x7e98, env->tr.base);
stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
stl_phys(sm_state + 0x7e94, env->tr.limit);
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
stq_phys(sm_state + 0x7ed0, env->efer);
stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
stq_phys(sm_state + 0x7ff8, env->regs[R_EAX]);
stq_phys(sm_state + 0x7ff0, env->regs[R_ECX]);
stq_phys(sm_state + 0x7fe8, env->regs[R_EDX]);
stq_phys(sm_state + 0x7fe0, env->regs[R_EBX]);
stq_phys(sm_state + 0x7fd8, env->regs[R_ESP]);
stq_phys(sm_state + 0x7fd0, env->regs[R_EBP]);
stq_phys(sm_state + 0x7fc8, env->regs[R_ESI]);
stq_phys(sm_state + 0x7fc0, env->regs[R_EDI]);
stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]);
stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]);
stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]);
stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]);
stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]);
stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]);
stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]);
stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]);
for (i = 8; i < 16; i++) {
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]);
}
stq_phys(sm_state + 0x7f78, env->eip);
stq_phys(cs->as, sm_state + 0x7f78, env->eip);
stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
stl_phys(sm_state + 0x7f68, env->dr[6]);
stl_phys(sm_state + 0x7f60, env->dr[7]);

View File

@ -88,9 +88,10 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
CPUState *cs = ENV_GET_CPU(env);
stw_phys(addr + offsetof(struct vmcb_seg, selector),
sc->selector);
stq_phys(addr + offsetof(struct vmcb_seg, base),
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
sc->base);
stl_phys(addr + offsetof(struct vmcb_seg, limit),
sc->limit);
@ -142,25 +143,33 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->vm_vmcb = addr;
/* save the current CPU state in the hsave page */
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
@ -172,10 +181,12 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
env->eip + next_eip_addend);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
/* load the interception bitmaps so we do not need to access the
vmcb in svm mode */
@ -215,7 +226,8 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
save.idtr.limit));
/* clear exit_info_2 so we behave like the real hardware */
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
cpu_x86_update_cr0(env, ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
@ -420,17 +432,18 @@ void helper_vmsave(CPUX86State *env, int aflag)
&env->ldt);
#ifdef TARGET_X86_64
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
env->kernelgsbase);
stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
#endif
stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
stq_phys(cs->as,
addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
env->sysenter_esp);
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
env->sysenter_eip);
}
@ -564,7 +577,8 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
/* next env->eip */
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
env->eip + next_eip_addend);
helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
}
@ -602,21 +616,26 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
int_ctl = ldl_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
@ -627,14 +646,18 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
}
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
env->eip);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
env->hflags & HF_CPL_MASK);
@ -700,9 +723,9 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
/* other setups */
cpu_x86_set_cpl(env, 0);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
exit_code);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
exit_info_1);
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),

View File

@ -101,20 +101,23 @@ static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
static inline void ppc_hash64_store_hpte0(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte0)
{
CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
stq_p(env->external_htab + pte_offset, pte0);
} else {
stq_phys(env->htab_base + pte_offset, pte0);
stq_phys(cs->as, env->htab_base + pte_offset, pte0);
}
}
static inline void ppc_hash64_store_hpte1(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte1)
{
CPUState *cs = ENV_GET_CPU(env);
if (env->external_htab) {
stq_p(env->external_htab + pte_offset + HASH_PTE_SIZE_64/2, pte1);
} else {
stq_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_64/2, pte1);
stq_phys(cs->as,
env->htab_base + pte_offset + HASH_PTE_SIZE_64/2, pte1);
}
}

View File

@ -138,18 +138,21 @@ static int trans_bits(CPUS390XState *env, uint64_t mode)
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t mode)
{
CPUState *cs = ENV_GET_CPU(env);
int ilen = ILEN_LATER_INC;
int bits = trans_bits(env, mode) | 4;
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
stq_phys(cs->as,
env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, PGM_PROTECTION, ilen);
}
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw)
{
CPUState *cs = ENV_GET_CPU(env);
int ilen = ILEN_LATER;
int bits = trans_bits(env, asc);
@ -160,7 +163,8 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
stq_phys(cs->as,
env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
trigger_pgm_exception(env, type, ilen);
}

View File

@ -1010,6 +1010,7 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* invalidate pte */
void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
{
CPUState *cs = ENV_GET_CPU(env);
uint64_t page = vaddr & TARGET_PAGE_MASK;
uint64_t pte = 0;
@ -1019,7 +1020,7 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
According to spec we'd have to find it out ourselves */
/* XXX Linux is fine with overwriting the pte, the spec requires
us to only set the invalid bit */
stq_phys(pte_addr, pte | _PAGE_INVALID);
stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */

View File

@ -794,13 +794,13 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
"%08x: unimplemented access size: %d\n", addr,
size);
}
stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
env->mxccdata[0]);
stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
env->mxccdata[1]);
stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
env->mxccdata[2]);
stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
env->mxccdata[3]);
break;
case 0x01c00a00: /* MXCC control register */
@ -1022,7 +1022,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
stl_phys(addr, val);
break;
case 8:
stq_phys(addr, val);
stq_phys(cs->as, addr, val);
break;
}
}
@ -1044,7 +1044,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 8:
stq_phys((hwaddr)addr
stq_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
}
@ -1660,6 +1660,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int asi, int size)
{
CPUState *cs = ENV_GET_CPU(env);
#ifdef DEBUG_ASI
dump_asi("write", addr, asi, size, val);
#endif
@ -1820,7 +1821,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
break;
case 8:
default:
stq_phys(addr, val);
stq_phys(cs->as, addr, val);
break;
}
}