KVM: s390: Handle MVPG partial execution interception

When the guest executes the MVPG instruction with DAT disabled,
and the source or destination page is not mapped in the host,
the so-called partial execution interception occurs. We need to
handle this event by setting up a mapping for the corresponding
user pages.

Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
Thomas Huth 2014-02-03 10:42:30 +01:00 committed by Christian Borntraeger
parent 198c74f43f
commit 9a558ee3cc
1 changed files with 54 additions and 1 deletions

View File

@ -1,7 +1,7 @@
/*
* in-kernel handling for sie intercepts
*
* Copyright IBM Corp. 2008, 2009
* Copyright IBM Corp. 2008, 2014
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@ -234,6 +234,58 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
return rc2;
}
/**
* Handle MOVE PAGE partial execution interception.
*
* This interception can only happen for guests with DAT disabled and
* addresses that are currently not mapped in the host. Thus we try to
* set up the mappings for the corresponding user pages here (or throw
* addressing exceptions in case of illegal guest addresses).
*/
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
unsigned long hostaddr, srcaddr, dstaddr;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct mm_struct *mm = current->mm;
int reg1, reg2, rc;
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
/* Make sure that the source is paged-in */
hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(hostaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL);
up_read(&mm->mmap_sem);
if (rc < 0)
return rc;
/* Make sure that the destination is paged-in */
hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(hostaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL);
up_read(&mm->mmap_sem);
if (rc < 0)
return rc;
psw->addr = __rewind_psw(*psw, 4);
return 0;
}
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
return -EOPNOTSUPP;
}
static const intercept_handler_t intercept_funcs[] = {
[0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction,
@ -245,6 +297,7 @@ static const intercept_handler_t intercept_funcs[] = {
[0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop,
[0x38 >> 2] = handle_partial_execution,
};
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)