s390/sthyi: reorganize sthyi implementation

As we need to support sthyi instruction on LPAR too, move the common code
to kernel part and kvm related code to intercept.c for better reuse.

Signed-off-by: QingFeng Hao <haoqf@linux.vnet.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
QingFeng Hao 2017-09-29 12:41:50 +02:00 committed by Martin Schwidefsky
parent 2bd6bf03f4
commit b7c92f1a4e
6 changed files with 98 additions and 73 deletions

View File

@ -198,4 +198,5 @@ struct service_level {
int register_service_level(struct service_level *); int register_service_level(struct service_level *);
int unregister_service_level(struct service_level *); int unregister_service_level(struct service_level *);
int sthyi_fill(void *dst, u64 *rc);
#endif /* __ASM_S390_SYSINFO_H */ #endif /* __ASM_S390_SYSINFO_H */

View File

@ -55,7 +55,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
extra-y += head.o head64.o vmlinux.lds extra-y += head.o head64.o vmlinux.lds

View File

@ -8,22 +8,17 @@
* Copyright IBM Corp. 2016 * Copyright IBM Corp. 2016
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com> * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
*/ */
#include <linux/kvm_host.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <asm/kvm_host.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/diag.h> #include <asm/diag.h>
#include <asm/sysinfo.h> #include <asm/sysinfo.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/facility.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace.h"
#define DED_WEIGHT 0xffff #define DED_WEIGHT 0xffff
/* /*
@ -382,88 +377,52 @@ out:
vfree(diag204_buf); vfree(diag204_buf);
} }
static int sthyi(u64 vaddr) static int sthyi(u64 vaddr, u64 *rc)
{ {
register u64 code asm("0") = 0; register u64 code asm("0") = 0;
register u64 addr asm("2") = vaddr; register u64 addr asm("2") = vaddr;
register u64 rcode asm("3");
int cc; int cc;
asm volatile( asm volatile(
".insn rre,0xB2560000,%[code],%[addr]\n" ".insn rre,0xB2560000,%[code],%[addr]\n"
"ipm %[cc]\n" "ipm %[cc]\n"
"srl %[cc],28\n" "srl %[cc],28\n"
: [cc] "=d" (cc) : [cc] "=d" (cc), "=d" (rcode)
: [code] "d" (code), [addr] "a" (addr) : [code] "d" (code), [addr] "a" (addr)
: "3", "memory", "cc"); : "memory", "cc");
*rc = rcode;
return cc; return cc;
} }
int handle_sthyi(struct kvm_vcpu *vcpu) /*
* sthyi_fill - Fill page with data returned by the STHYI instruction
*
* @dst: Pointer to zeroed page
* @rc: Pointer for storing the return code of the instruction
*
* Fills the destination with system information returned by the STHYI
* instruction. The data is generated by emulation or execution of STHYI,
* if available. The return value is the condition code that would be
* returned, the rc parameter is the return code which is passed in
* register R2 + 1.
*/
int sthyi_fill(void *dst, u64 *rc)
{ {
int reg1, reg2, r = 0; struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
u64 code, addr, cc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
/* /*
* STHYI requires extensive locking in the higher hypervisors * If the facility is on, we don't want to emulate the instruction.
* and is very computational/memory expensive. Therefore we * We ask the hypervisor to provide the data.
* ratelimit the executions per VM.
*/ */
if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) { if (test_facility(74))
kvm_s390_retry_instr(vcpu); return sthyi((u64)dst, rc);
return 0;
}
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
code = vcpu->run->s.regs.gprs[reg1];
addr = vcpu->run->s.regs.gprs[reg2];
vcpu->stat.instruction_sthyi++;
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) {
cc = 3;
goto out;
}
if (addr & ~PAGE_MASK)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
sctns = (void *)get_zeroed_page(GFP_KERNEL);
if (!sctns)
return -ENOMEM;
/*
* If we are a guest, we don't want to emulate an emulated
* instruction. We ask the hypervisor to provide the data.
*/
if (test_facility(74)) {
cc = sthyi((u64)sctns);
goto out;
}
fill_hdr(sctns); fill_hdr(sctns);
fill_stsi(sctns); fill_stsi(sctns);
fill_diag(sctns); fill_diag(sctns);
out: *rc = 0;
if (!cc) { return 0;
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
free_page((unsigned long)sctns);
return kvm_s390_inject_prog_cond(vcpu, r);
}
}
free_page((unsigned long)sctns);
vcpu->run->s.regs.gprs[reg2 + 1] = cc ? 4 : 0;
kvm_s390_set_psw_cc(vcpu, cc);
return r;
} }
EXPORT_SYMBOL_GPL(sthyi_fill);

View File

@ -12,6 +12,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
kvm-objs += diag.o gaccess.o guestdbg.o sthyi.o vsie.o kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o

View File

@ -18,6 +18,7 @@
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/sysinfo.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
@ -360,6 +361,71 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/*
* Handle the sthyi instruction that provides the guest with system
* information, like current CPU resources available at each level of
* the machine.
*/
int handle_sthyi(struct kvm_vcpu *vcpu)
{
int reg1, reg2, r = 0;
u64 code, addr, cc = 0, rc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
/*
* STHYI requires extensive locking in the higher hypervisors
* and is very computational/memory expensive. Therefore we
* ratelimit the executions per VM.
*/
if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
kvm_s390_retry_instr(vcpu);
return 0;
}
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
code = vcpu->run->s.regs.gprs[reg1];
addr = vcpu->run->s.regs.gprs[reg2];
vcpu->stat.instruction_sthyi++;
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) {
cc = 3;
rc = 4;
goto out;
}
if (addr & ~PAGE_MASK)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
sctns = (void *)get_zeroed_page(GFP_KERNEL);
if (!sctns)
return -ENOMEM;
cc = sthyi_fill(sctns, &rc);
out:
if (!cc) {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
free_page((unsigned long)sctns);
return kvm_s390_inject_prog_cond(vcpu, r);
}
}
free_page((unsigned long)sctns);
vcpu->run->s.regs.gprs[reg2 + 1] = rc;
kvm_s390_set_psw_cc(vcpu, cc);
return r;
}
static int handle_operexc(struct kvm_vcpu *vcpu) static int handle_operexc(struct kvm_vcpu *vcpu)
{ {
psw_t oldpsw, newpsw; psw_t oldpsw, newpsw;

View File

@ -242,6 +242,8 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu)); kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
} }
int handle_sthyi(struct kvm_vcpu *vcpu);
/* implemented in priv.c */ /* implemented in priv.c */
int is_valid_psw(psw_t *psw); int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
@ -268,9 +270,6 @@ void kvm_s390_vsie_destroy(struct kvm *kvm);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in sthyi.c */
int handle_sthyi(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock_ext(struct kvm *kvm, void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod); const struct kvm_s390_vm_tod_clock *gtod);