diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c index ead4f222d5..2a153fa8c9 100644 --- a/hw/s390x/s390-pci-vfio.c +++ b/hw/s390x/s390-pci-vfio.c @@ -29,14 +29,11 @@ */ bool s390_pci_update_dma_avail(int fd, unsigned int *avail) { - g_autofree struct vfio_iommu_type1_info *info; - uint32_t argsz; + uint32_t argsz = sizeof(struct vfio_iommu_type1_info); + g_autofree struct vfio_iommu_type1_info *info = g_malloc0(argsz); assert(avail); - argsz = sizeof(struct vfio_iommu_type1_info); - info = g_malloc0(argsz); - /* * If the specified argsz is not large enough to contain all capabilities * it will be updated upon return from the ioctl. Retry until we have @@ -230,7 +227,7 @@ static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, */ void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) { - g_autofree struct vfio_device_info *info; + g_autofree struct vfio_device_info *info = NULL; VFIOPCIDevice *vfio_pci; uint32_t argsz; int fd; diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 687c598be9..a1ab1ee12d 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -249,10 +249,6 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); int kvm_on_sigbus(int code, void *addr); -/* interface with exec.c */ - -void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared)); - /* internal API */ int kvm_ioctl(KVMState *s, int type, ...); diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 7e8b0fab89..9e5ef4828e 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -1144,19 +1144,6 @@ static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(FlatView *fv, hwaddr base); -static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) = - qemu_anon_ram_alloc; - -/* - * Set a custom physical guest memory alloator. - * Accelerators with unusual needs may need this. Hopefully, we can - * get rid of it eventually. - */ -void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared)) -{ - phys_mem_alloc = alloc; -} - static uint16_t phys_section_add(PhysPageMap *map, MemoryRegionSection *section) { @@ -1962,8 +1949,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared) return; } } else { - new_block->host = phys_mem_alloc(new_block->max_length, - &new_block->mr->align, shared); + new_block->host = qemu_anon_ram_alloc(new_block->max_length, + &new_block->mr->align, + shared); if (!new_block->host) { error_setg_errno(errp, errno, "cannot set up guest memory '%s'", @@ -2047,17 +2035,6 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, return NULL; } - if (phys_mem_alloc != qemu_anon_ram_alloc) { - /* - * file_ram_alloc() needs to allocate just like - * phys_mem_alloc, but we haven't bothered to provide - * a hook there. - */ - error_setg(errp, - "-mem-path not supported with this accelerator"); - return NULL; - } - size = HOST_PAGE_ALIGN(size); file_size = get_file_size(fd); if (file_size > 0 && file_size < size) { @@ -2247,13 +2224,6 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) area = mmap(vaddr, length, PROT_READ | PROT_WRITE, flags, block->fd, offset); } else { - /* - * Remap needs to match alloc. Accelerators that - * set phys_mem_alloc never remap. If they did, - * we'd need a remap hook here. - */ - assert(phys_mem_alloc == qemu_anon_ram_alloc); - flags |= MAP_PRIVATE | MAP_ANONYMOUS; area = mmap(vaddr, length, PROT_READ | PROT_WRITE, flags, -1, 0); diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 60d434d5ed..468b4430f3 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -114,6 +114,11 @@ struct CPUS390XState { uint64_t diag318_info; +#if !defined(CONFIG_USER_ONLY) + uint64_t tlb_fill_tec; /* translation exception code during tlb_fill */ + int tlb_fill_exc; /* exception number seen during tlb_fill */ +#endif + /* Fields up to this point are cleared by a CPU reset */ struct {} end_reset_fields; diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index dd474c5e9a..050dcf2d42 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -86,8 +86,8 @@ static S390CPUDef s390_cpu_defs[] = { CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"), CPUDEF_INIT(0x3906, 14, 2, 47, 0x08000000U, "z14.2", "IBM z14 GA2"), CPUDEF_INIT(0x3907, 14, 1, 47, 0x08000000U, "z14ZR1", "IBM z14 Model ZR1 GA1"), - CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 GA1"), - CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM 8562 GA1"), + CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 T01 GA1"), + CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"), }; #define QEMU_MAX_CPU_TYPE 0x2964 diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c index ce16af394b..c48cd6b46f 100644 --- a/target/s390x/excp_helper.c +++ b/target/s390x/excp_helper.c @@ -164,6 +164,9 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, tec = 0; /* unused */ } + env->tlb_fill_exc = excp; + env->tlb_fill_tec = tec; + if (!excp) { qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 55bd1551e6..d4e4f3388f 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -18,7 +18,7 @@ DEF_HELPER_3(srstu, void, env, i32, i32) DEF_HELPER_4(clst, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64) -DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i32, i32) DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_3(mvst, i32, env, i32, i32) DEF_HELPER_4(ex, void, env, i32, i64, i64) diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index e5b6efabf3..0bb1886a2e 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -641,7 +641,7 @@ /* MOVE NUMERICS */ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) /* MOVE PAGE */ - C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0) + C(0xb254, MVPG, RRE, Z, 0, 0, 0, 0, mvpg, 0) /* MOVE STRING */ C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0) /* MOVE WITH OPTIONAL SPECIFICATION */ diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 73f816a722..4fb3bbfef5 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -161,8 +161,6 @@ static int cap_protected; static int active_cmma; -static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared); - static int kvm_s390_query_mem_limit(uint64_t *memory_limit) { struct kvm_device_attr attr = { @@ -349,6 +347,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s) "please use kernel 3.15 or newer"); return -1; } + if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { + error_report("KVM is missing capability KVM_CAP_S390_COW - " + "unsupported environment"); + return -1; + } cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); @@ -357,11 +360,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); - if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) - || !kvm_check_extension(s, KVM_CAP_S390_COW)) { - phys_mem_set_alloc(legacy_s390_alloc); - } - kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); @@ -889,37 +887,6 @@ int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, return ret; } -/* - * Legacy layout for s390: - * Older S390 KVM requires the topmost vma of the RAM to be - * smaller than an system defined value, which is at least 256GB. - * Larger systems have larger values. We put the guest between - * the end of data segment (system break) and this value. We - * use 32GB as a base to have enough room for the system break - * to grow. We also have to use MAP parameters that avoid - * read-only mapping of guest pages. - */ -static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared) -{ - static void *mem; - - if (mem) { - /* we only support one allocation, which is enough for initial ram */ - return NULL; - } - - mem = mmap((void *) 0x800000000ULL, size, - PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (mem == MAP_FAILED) { - mem = NULL; - } - if (mem && align) { - *align = QEMU_VMALLOC_ALIGN; - } - return mem; -} - static uint8_t const *sw_bp_inst; static uint8_t sw_bp_ilen; diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 25cfede806..12e84a4285 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -130,28 +130,103 @@ typedef struct S390Access { int mmu_idx; } S390Access; +/* + * With nonfault=1, return the PGM_ exception that would have been injected + * into the guest; return 0 if no exception was detected. + * + * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec. + * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr. + */ +static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t ra) +{ + int flags; + +#if defined(CONFIG_USER_ONLY) + flags = page_get_flags(addr); + if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE))) { + env->__excp_addr = addr; + flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING; + if (nonfault) { + return flags; + } + tcg_s390_program_interrupt(env, flags, ra); + } + *phost = g2h(env_cpu(env), addr); +#else + /* + * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL + * to detect if there was an exception during tlb_fill(). + */ + env->tlb_fill_exc = 0; + flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost, + ra); + if (env->tlb_fill_exc) { + return env->tlb_fill_exc; + } + + if (unlikely(flags & TLB_WATCHPOINT)) { + /* S390 does not presently use transaction attributes. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + MEMTXATTRS_UNSPECIFIED, + (access_type == MMU_DATA_STORE + ? BP_MEM_WRITE : BP_MEM_READ), ra); + } +#endif + return 0; +} + +static int access_prepare_nf(S390Access *access, CPUS390XState *env, + bool nonfault, vaddr vaddr1, int size, + MMUAccessType access_type, + int mmu_idx, uintptr_t ra) +{ + void *haddr1, *haddr2 = NULL; + int size1, size2, exc; + vaddr vaddr2 = 0; + + assert(size > 0 && size <= 4096); + + size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), + size2 = size - size1; + + exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault, + &haddr1, ra); + if (exc) { + return exc; + } + if (unlikely(size2)) { + /* The access crosses page boundaries. */ + vaddr2 = wrap_address(env, vaddr1 + size1); + exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx, + nonfault, &haddr2, ra); + if (exc) { + return exc; + } + } + + *access = (S390Access) { + .vaddr1 = vaddr1, + .vaddr2 = vaddr2, + .haddr1 = haddr1, + .haddr2 = haddr2, + .size1 = size1, + .size2 = size2, + .mmu_idx = mmu_idx + }; + return 0; +} + static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t ra) { - S390Access access = { - .vaddr1 = vaddr, - .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), - .mmu_idx = mmu_idx, - }; - - g_assert(size > 0 && size <= 4096); - access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type, - mmu_idx, ra); - - if (unlikely(access.size1 != size)) { - /* The access crosses page boundaries. */ - access.vaddr2 = wrap_address(env, vaddr + access.size1); - access.size2 = size - access.size1; - access.haddr2 = probe_access(env, access.vaddr2, access.size2, - access_type, mmu_idx, ra); - } - return access; + S390Access ret; + int exc = access_prepare_nf(&ret, env, false, vaddr, size, + access_type, mmu_idx, ra); + assert(!exc); + return ret; } /* Helper to handle memset on a single page. */ @@ -840,33 +915,58 @@ uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) } /* move page */ -uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) +uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) { + const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK; + const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK; const int mmu_idx = cpu_mmu_index(env, false); const bool f = extract64(r0, 11, 1); const bool s = extract64(r0, 10, 1); + const bool cco = extract64(r0, 8, 1); uintptr_t ra = GETPC(); S390Access srca, desta; + int exc; if ((f && s) || extract64(r0, 12, 4)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } - r1 = wrap_address(env, r1 & TARGET_PAGE_MASK); - r2 = wrap_address(env, r2 & TARGET_PAGE_MASK); - /* - * TODO: - * - Access key handling - * - CC-option with surpression of page-translation exceptions - * - Store r1/r2 register identifiers at real location 162 + * We always manually handle exceptions such that we can properly store + * r1/r2 to the lowcore on page-translation exceptions. + * + * TODO: Access key handling */ - srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, - ra); - desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, - ra); + exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE, + MMU_DATA_LOAD, mmu_idx, ra); + if (exc) { + if (cco) { + return 2; + } + goto inject_exc; + } + exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE, + MMU_DATA_STORE, mmu_idx, ra); + if (exc) { + if (cco && exc != PGM_PROTECTION) { + return 1; + } + goto inject_exc; + } access_memmove(env, &desta, &srca, ra); return 0; /* data moved */ +inject_exc: +#if !defined(CONFIG_USER_ONLY) + if (exc != PGM_ADDRESSING) { + stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), + env->tlb_fill_tec); + } + if (exc == PGM_PAGE_TRANS) { + stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id), + r1 << 4 | r2); + } +#endif + tcg_s390_program_interrupt(env, exc, ra); } /* string copy */ diff --git a/target/s390x/translate.c b/target/s390x/translate.c index 61dd0341e4..4f953ddfba 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -3513,7 +3513,12 @@ static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) { - gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2); + TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); + + gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); set_cc_static(s); return DISAS_NEXT; }