KVM: s390: Features and fixes for 5.2

- VSIE crypto fixes
 - new guest features for gen15
 - disable halt polling for nested virtualization with overcommit
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJcxrJmAAoJEBF7vIC1phx8EsEP/2mIUbtY9OmVCZNHX43ds5Jr
 WR51UA/cXQGzP1cqLrqIchjJ40J7KGYBqS+9MeOyUxX85HUvb5dGgUiIfDOmh8R7
 YIHe3nkM0dcIRbeuSp48sA8rl817TNGSBg7GnUN+eaEvJ/U+WbLb1sry/0uZN6Tm
 2iFkff+XgSeEfBmrlxiPVl5PGUxi6FtKQWDwhn+MRkvs4sdQBh1SBITMIrzMgDmQ
 GMd5olfLp3AZZV2yniFvZM9TSWvKobCCH6IVF0/mBchxkqmdjQaKdSCRO6a1pLDh
 8PVBN7i+yipLURUMBuDCMxGDBINJgvvXkThB8N9K6+CanUc8KCc7l0EimS93s3DB
 FsutI/2mSFy/xJ4nk98VVp8WCbVftQLtyKUSytBiqCTSpg1gtFMMntCPAqlON4TV
 xHOaAnJjF4Lhvfm0QrxQ22bAmuju6WIh5WKG8D+s7yqcn7GZeDUYdeftWiGNteaf
 sJwX1Vq8H6iUac1mfp7UbfT+60UuiCkj/d9sY9eRBNlPPIX6V4UgZU4Xh8/rSMf3
 qnN4RCBGIQqndUzRzaw7ZtAfNy5jBE1BABems49fy07kuPCzrg9tQqXlWxf/60Ad
 QKqZ3Q/hb4ixYQJ7TAqQZmq1D3NL8w+V9MthcILmEGfMYF4BZKJV39ZigbttRIcN
 ZuiS+8IfOWN1IXZ2zXL0
 =mZyZ
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Features and fixes for 5.2

- VSIE crypto fixes
- new guest features for gen15
- disable halt polling for nested virtualization with overcommit
This commit is contained in:
Paolo Bonzini 2019-04-30 21:29:14 +02:00
commit da8f0d97b2
13 changed files with 163 additions and 11 deletions

View File

@ -141,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc {
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4 u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5 u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
u8 kma[16]; # valid with Message-Security-Assist-Extension 8 u8 kma[16]; # valid with Message-Security-Assist-Extension 8
u8 reserved[1808]; # reserved for future instructions u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9
u8 reserved[1792]; # reserved for future instructions
}; };
Parameters: address of a buffer to load the subfunction blocks from. Parameters: address of a buffer to load the subfunction blocks from.

View File

@ -28,6 +28,7 @@
#define CPACF_KMCTR 0xb92d /* MSA4 */ #define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PRNO 0xb93c /* MSA5 */ #define CPACF_PRNO 0xb93c /* MSA5 */
#define CPACF_KMA 0xb929 /* MSA8 */ #define CPACF_KMA 0xb929 /* MSA8 */
#define CPACF_KDSA 0xb93a /* MSA9 */
/* /*
* En/decryption modifier bits * En/decryption modifier bits

View File

@ -278,6 +278,7 @@ struct kvm_s390_sie_block {
#define ECD_HOSTREGMGMT 0x20000000 #define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000 #define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000 #define ECD_ETOKENF 0x02000000
#define ECD_ECC 0x00200000
__u32 ecd; /* 0x01c8 */ __u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */ __u64 pp; /* 0x01de */
@ -312,6 +313,7 @@ struct kvm_vcpu_stat {
u64 halt_successful_poll; u64 halt_successful_poll;
u64 halt_attempted_poll; u64 halt_attempted_poll;
u64 halt_poll_invalid; u64 halt_poll_invalid;
u64 halt_no_poll_steal;
u64 halt_wakeup; u64 halt_wakeup;
u64 instruction_lctl; u64 instruction_lctl;
u64 instruction_lctlg; u64 instruction_lctlg;

View File

@ -152,7 +152,10 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */ __u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */ __u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */ __u8 kma[16]; /* with MSA8 */
__u8 reserved[1808]; __u8 kdsa[16]; /* with MSA9 */
__u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
__u8 reserved[1728];
}; };
/* kvm attributes for crypto */ /* kvm attributes for crypto */

View File

@ -31,6 +31,7 @@ config KVM
select HAVE_KVM_IRQFD select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_INVALID_WAKEUPS select HAVE_KVM_INVALID_WAKEUPS
select HAVE_KVM_NO_POLL
select SRCU select SRCU
select KVM_VFIO select KVM_VFIO
---help--- ---help---

View File

@ -14,6 +14,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/mmu_context.h> #include <linux/mmu_context.h>
#include <linux/nospec.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
@ -2307,6 +2308,7 @@ static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
{ {
if (id >= MAX_S390_IO_ADAPTERS) if (id >= MAX_S390_IO_ADAPTERS)
return NULL; return NULL;
id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
return kvm->arch.adapters[id]; return kvm->arch.adapters[id];
} }
@ -2320,8 +2322,13 @@ static int register_io_adapter(struct kvm_device *dev,
(void __user *)attr->addr, sizeof(adapter_info))) (void __user *)attr->addr, sizeof(adapter_info)))
return -EFAULT; return -EFAULT;
if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
(dev->kvm->arch.adapters[adapter_info.id] != NULL)) return -EINVAL;
adapter_info.id = array_index_nospec(adapter_info.id,
MAX_S390_IO_ADAPTERS);
if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
return -EINVAL; return -EINVAL;
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);

View File

@ -75,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
{ "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) },
@ -177,6 +178,11 @@ static int hpage;
module_param(hpage, int, 0444); module_param(hpage, int, 0444);
MODULE_PARM_DESC(hpage, "1m huge page backing support"); MODULE_PARM_DESC(hpage, "1m huge page backing support");
/* maximum percentage of steal time for polling. >100 is treated like 100 */
static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(hpage, "Maximum percentage of steal time to allow polling");
/* /*
* For now we handle at most 16 double words as this is what the s390 base * For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond * kernel handles and stores in the prefix page. If we ever need to go beyond
@ -321,6 +327,22 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0; return cc == 0;
} }
static inline void __insn32_query(unsigned int opcode, u8 query[32])
{
register unsigned long r0 asm("0") = 0; /* query function */
register unsigned long r1 asm("1") = (unsigned long) query;
asm volatile(
/* Parameter regs are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
: "=m" (*query)
: "d" (r0), "a" (r1), [opc] "i" (opcode)
: "cc");
}
#define INSN_SORTL 0xb938
#define INSN_DFLTCC 0xb939
static void kvm_s390_cpu_feat_init(void) static void kvm_s390_cpu_feat_init(void)
{ {
int i; int i;
@ -368,6 +390,16 @@ static void kvm_s390_cpu_feat_init(void)
__cpacf_query(CPACF_KMA, (cpacf_mask_t *) __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kma); kvm_s390_available_subfunc.kma);
if (test_facility(155)) /* MSA9 */
__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
if (MACHINE_HAS_ESOP) if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/* /*
@ -654,6 +686,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 135); set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135); set_kvm_facility(kvm->arch.model.fac_list, 135);
} }
if (test_facility(148)) {
set_kvm_facility(kvm->arch.model.fac_mask, 148);
set_kvm_facility(kvm->arch.model.fac_list, 148);
}
if (test_facility(152)) {
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
}
r = 0; r = 0;
} else } else
r = -EINVAL; r = -EINVAL;
@ -1320,6 +1360,19 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0; return 0;
} }
@ -1488,6 +1541,19 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0; return 0;
} }
@ -1543,6 +1609,19 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0], ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]); ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
return 0; return 0;
} }
@ -2814,6 +2893,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = vcpu->arch.gmap; vcpu->arch.enabled_gmap = vcpu->arch.gmap;
} }
static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
{
if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
return true;
return false;
}
static bool kvm_has_pckmo_ecc(struct kvm *kvm)
{
/* At least one ECC subfunction must be present */
return kvm_has_pckmo_subfunc(kvm, 32) ||
kvm_has_pckmo_subfunc(kvm, 33) ||
kvm_has_pckmo_subfunc(kvm, 34) ||
kvm_has_pckmo_subfunc(kvm, 40) ||
kvm_has_pckmo_subfunc(kvm, 41);
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{ {
/* /*
@ -2826,13 +2924,19 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE; vcpu->arch.sie_block->eca &= ~ECA_APIE;
vcpu->arch.sie_block->ecd &= ~ECD_ECC;
if (vcpu->kvm->arch.crypto.apie) if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE; vcpu->arch.sie_block->eca |= ECA_APIE;
/* Set up protected key support */ /* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw) if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu->arch.sie_block->ecb3 |= ECB3_AES; vcpu->arch.sie_block->ecb3 |= ECB3_AES;
/* ecc is also wrapped with AES key */
if (kvm_has_pckmo_ecc(vcpu->kvm))
vcpu->arch.sie_block->ecd |= ECD_ECC;
}
if (vcpu->kvm->arch.crypto.dea_kw) if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA; vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
} }
@ -3065,6 +3169,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
} }
} }
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
/* do not poll with more than halt_poll_max_steal percent of steal time */
if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
halt_poll_max_steal) {
vcpu->stat.halt_no_poll_steal++;
return true;
}
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
/* kvm common code refers to this, but never calls it */ /* kvm common code refers to this, but never calls it */

View File

@ -288,7 +288,9 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U; const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2; unsigned long *b1, *b2;
u8 ecb3_flags; u8 ecb3_flags;
u32 ecd_flags;
int apie_h; int apie_h;
int apie_s;
int key_msk = test_kvm_facility(vcpu->kvm, 76); int key_msk = test_kvm_facility(vcpu->kvm, 76);
int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
@ -297,7 +299,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0; scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE; apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) apie_s = apie_h & scb_o->eca;
if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
return 0; return 0;
if (!crycb_addr) if (!crycb_addr)
@ -308,7 +311,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
((crycb_addr + 128) & PAGE_MASK)) ((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU); return set_validity_icpt(scb_s, 0x003CU);
if (apie_h && (scb_o->eca & ECA_APIE)) { if (apie_s) {
ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu->kvm->arch.crypto.crycb, vcpu->kvm->arch.crypto.crycb,
fmt_o, fmt_h); fmt_o, fmt_h);
@ -320,7 +323,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* we may only allow it if enabled for guest 2 */ /* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA); (ECB3_AES | ECB3_DEA);
if (!ecb3_flags) ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
if (!ecb3_flags && !ecd_flags)
goto end; goto end;
/* copy only the wrapping keys */ /* copy only the wrapping keys */
@ -329,6 +333,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U); return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags; scb_s->ecb3 |= ecb3_flags;
scb_s->ecd |= ecd_flags;
/* xor both blocks in one run */ /* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@ -339,7 +344,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
end: end:
switch (ret) { switch (ret) {
case -EINVAL: case -EINVAL:
return set_validity_icpt(scb_s, 0x0020U); return set_validity_icpt(scb_s, 0x0022U);
case -EFAULT: case -EFAULT:
return set_validity_icpt(scb_s, 0x0035U); return set_validity_icpt(scb_s, 0x0035U);
case -EACCES: case -EACCES:

View File

@ -93,6 +93,9 @@ static struct facility_def facility_defs[] = {
131, /* enhanced-SOP 2 and side-effect */ 131, /* enhanced-SOP 2 and side-effect */
139, /* multiple epoch facility */ 139, /* multiple epoch facility */
146, /* msa extension 8 */ 146, /* msa extension 8 */
150, /* enhanced sort */
151, /* deflate conversion */
155, /* msa extension 9 */
-1 /* END */ -1 /* END */
} }
}, },

View File

@ -1307,6 +1307,16 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
} }
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
#ifdef CONFIG_HAVE_KVM_NO_POLL
/* Callback that tells if we must not poll */
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
#else
static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
return false;
}
#endif /* CONFIG_HAVE_KVM_NO_POLL */
#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
long kvm_arch_vcpu_async_ioctl(struct file *filp, long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg); unsigned int ioctl, unsigned long arg);

View File

@ -152,7 +152,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */ __u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */ __u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */ __u8 kma[16]; /* with MSA8 */
__u8 reserved[1808]; __u8 kdsa[16]; /* with MSA9 */
__u8 reserved[1792];
}; };
/* kvm attributes for crypto */ /* kvm attributes for crypto */

View File

@ -57,3 +57,6 @@ config HAVE_KVM_VCPU_ASYNC_IOCTL
config HAVE_KVM_VCPU_RUN_PID_CHANGE config HAVE_KVM_VCPU_RUN_PID_CHANGE
bool bool
config HAVE_KVM_NO_POLL
bool

View File

@ -2253,7 +2253,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
u64 block_ns; u64 block_ns;
start = cur = ktime_get(); start = cur = ktime_get();
if (vcpu->halt_poll_ns) { if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.halt_attempted_poll; ++vcpu->stat.halt_attempted_poll;