KVM: arm: vgic: Only use the virtual state when userspace accesses enable bits
There is no point in accessing the HW when writing to any of the ISENABLER/ICENABLER registers from userspace, as only the guest should be allowed to change the HW state. Introduce new userspace-specific accessors that deal solely with the virtual state. Reported-by: James Morse <james.morse@arm.com> Tested-by: James Morse <james.morse@arm.com> Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
9a50ebbffa
commit
41ee52ecbc
@ -409,10 +409,12 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
||||
NULL, vgic_mmio_uaccess_write_v2_group, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
|
||||
|
@ -538,10 +538,12 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
|
||||
vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
@ -609,11 +611,13 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
|
||||
vgic_mmio_read_group, vgic_mmio_write_group, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
|
@ -184,6 +184,48 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->enabled = true;
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->enabled = false;
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
|
@ -138,6 +138,14 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user