diff --git a/cpus.c b/cpus.c index 3de2e27f41..e476a3cd5e 100644 --- a/cpus.c +++ b/cpus.c @@ -70,7 +70,8 @@ static bool cpu_thread_is_idle(CPUArchState *env) if (env->stopped || !runstate_is_running()) { return true; } - if (!env->halted || qemu_cpu_has_work(env) || kvm_irqchip_in_kernel()) { + if (!env->halted || qemu_cpu_has_work(env) || + kvm_async_interrupts_enabled()) { return false; } return true; diff --git a/kvm-all.c b/kvm-all.c index bf647614e8..bdb5550293 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -100,6 +100,7 @@ struct KVMState KVMState *kvm_state; bool kvm_kernel_irqchip; +bool kvm_async_interrupts_allowed; static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_INFO(USER_MEMORY), @@ -857,7 +858,7 @@ int kvm_irqchip_set_irq(KVMState *s, int irq, int level) struct kvm_irq_level event; int ret; - assert(kvm_irqchip_in_kernel()); + assert(kvm_async_interrupts_enabled()); event.level = level; event.irq = irq; @@ -1201,6 +1202,10 @@ static int kvm_irqchip_create(KVMState *s) s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS; } kvm_kernel_irqchip = true; + /* If we have an in-kernel IRQ chip then we must have asynchronous + * interrupt delivery (though the reverse is not necessarily true) + */ + kvm_async_interrupts_allowed = true; kvm_init_irq_routing(s); diff --git a/kvm-stub.c b/kvm-stub.c index d23b11c020..a7a03e14ac 100644 --- a/kvm-stub.c +++ b/kvm-stub.c @@ -19,6 +19,7 @@ KVMState *kvm_state; bool kvm_kernel_irqchip; +bool kvm_async_interrupts_allowed; int kvm_init_vcpu(CPUArchState *env) { diff --git a/kvm.h b/kvm.h index 2617dd5acd..09818f38f1 100644 --- a/kvm.h +++ b/kvm.h @@ -24,13 +24,26 @@ extern int kvm_allowed; extern bool kvm_kernel_irqchip; +extern bool kvm_async_interrupts_allowed; #if defined CONFIG_KVM || !defined NEED_CPU_H #define kvm_enabled() (kvm_allowed) #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) + +/** + * kvm_async_interrupts_enabled: + * + * Returns: true if we can deliver interrupts to KVM + * asynchronously (ie by ioctl from any thread at any time) + * rather than having to do interrupt delivery synchronously + * (where the vcpu must be stopped at a suitable point first). + */ +#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) + #else #define kvm_enabled() (0) #define kvm_irqchip_in_kernel() (false) +#define kvm_async_interrupts_enabled() (false) #endif struct kvm_run;