KVM/ARM fixes:

- Fix per-vcpu vgic bitmap allocation
 - Do not give copy random memory on MMIO read
 - Fix GICv3 APR register restore order
 
 KVM/x86 fixes:
 - Fix ubsan warning
 - Fix hardware breakpoints in a guest vs. preempt notifiers
 - Fix Hurd
 
 Generic:
 - use __GFP_NOWARN together with GFP_NOWAIT
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJWzsReAAoJEL/70l94x66DT6cH/3K/X/eciQIQTjLWKQ9BUhsN
 +4WN+PX51GCvRZgoGgXXxTUzWVpSHNE7iD5FR/yqiUpC6lq+GWYKyQYBU6S2tw7N
 QrzVFUAOIAExfzw4ztLz8pvIIwsF6EC2sA0DRZO85FWApO4P3BJN/1nBa+THJchH
 6RamguztCjVSfboFwpulPzmgzJwIQ1ai+KoO1z/1ifrxjOHLytF5wn6UegPXIkc6
 PAWG0b6w2ZnSwTNhEdsjzlcEANd/otwOoTlcft//KLuBkSS0GgU3vgxv7OXeSn67
 +Wa9wWT/rU6M4Ol0noXcyr/kiF5629bQ4IyLK7YFgOUPFt4Tmg+A1ABGc92WJa4=
 =/9Sf
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "KVM/ARM fixes:
   - Fix per-vcpu vgic bitmap allocation
   - Do not give copy random memory on MMIO read
   - Fix GICv3 APR register restore order

  KVM/x86 fixes:
   - Fix ubsan warning
   - Fix hardware breakpoints in a guest vs. preempt notifiers
   - Fix Hurd

  Generic:
   - use __GFP_NOWARN together with GFP_NOWAIT"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: MMU: fix ubsan index-out-of-range warning
  arm64: KVM: vgic-v3: Restore ICH_APR0Rn_EL2 before ICH_APR1Rn_EL2
  KVM: async_pf: do not warn on page allocation failures
  KVM: x86: fix conversion of addresses to linear in 32-bit protected mode
  KVM: x86: fix missed hardware breakpoints
  arm/arm64: KVM: Feed initialized memory to MMIO accesses
  KVM: arm/arm64: vgic: Ensure bitmaps are long enough
This commit is contained in:
Linus Torvalds 2016-02-25 19:53:54 -08:00
commit 73056bbc68
7 changed files with 19 additions and 17 deletions

View File

@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
memcpy(run->mmio.data, data_buf, len);
if (is_write)
memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */

View File

@ -147,16 +147,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
switch (nr_pri_bits) {
case 7:
write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
case 6:
write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
default:
write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
}
switch (nr_pri_bits) {
case 7:
write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
}
switch (nr_pri_bits) {
case 7:
write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
case 6:
write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
default:
write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
}
switch (max_lr_idx) {
case 15:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);

View File

@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
*linear = la;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la))
goto bad;
@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad;
break;
default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (size > *max_size)
goto bad;
}
la &= (u32)-1;
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))

View File

@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
return ret;
kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
walker->ptes[level] = pte;
walker->ptes[level - 1] = pte;
}
return 0;
}

View File

@ -2752,6 +2752,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

View File

@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
int sz = nr_longs * sizeof(unsigned long);
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);

View File

@ -172,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;