diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 8e8514efb124..ee62776e5433 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -452,9 +452,100 @@ static inline u32 kvmppc_get_xics_latch(void) return xirr; } -static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +/* + * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to + * a CPU thread that's running/napping inside of a guest is by default regarded + * as a request to wake the CPU (if needed) and continue execution within the + * guest, potentially to process new state like externally-generated + * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI). + * + * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called + * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the + * target CPU's PACA. To avoid unnecessary exits to the host, this flag should + * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on + * the receiving side prior to processing the IPI work. + * + * NOTE: + * + * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi(). + * This is to guard against sequences such as the following: + * + * CPU + * X: smp_muxed_ipi_set_message(): + * X: smp_mb() + * X: message[RESCHEDULE] = 1 + * X: doorbell_global_ipi(42): + * X: kvmppc_set_host_ipi(42) + * X: ppc_msgsnd_sync()/smp_mb() + * X: ppc_msgsnd() -> 42 + * 42: doorbell_exception(): // from CPU X + * 42: ppc_msgsync() + * 105: smp_muxed_ipi_set_message(): + * 105: smb_mb() + * // STORE DEFERRED DUE TO RE-ORDERING + * --105: message[CALL_FUNCTION] = 1 + * | 105: doorbell_global_ipi(42): + * | 105: kvmppc_set_host_ipi(42) + * | 42: kvmppc_clear_host_ipi(42) + * | 42: smp_ipi_demux_relaxed() + * | 42: // returns to executing guest + * | // RE-ORDERED STORE COMPLETES + * ->105: message[CALL_FUNCTION] = 1 + * 105: ppc_msgsnd_sync()/smp_mb() + * 105: ppc_msgsnd() -> 42 + * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored + * 105: // hangs waiting on 42 to process messages/call_single_queue + * + * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is + * to guard against sequences such as the following (as well as to create + * a read-side pairing with the barrier in kvmppc_set_host_ipi()): + * + * CPU + * X: smp_muxed_ipi_set_message(): + * X: smp_mb() + * X: message[RESCHEDULE] = 1 + * X: doorbell_global_ipi(42): + * X: kvmppc_set_host_ipi(42) + * X: ppc_msgsnd_sync()/smp_mb() + * X: ppc_msgsnd() -> 42 + * 42: doorbell_exception(): // from CPU X + * 42: ppc_msgsync() + * // STORE DEFERRED DUE TO RE-ORDERING + * -- 42: kvmppc_clear_host_ipi(42) + * | 42: smp_ipi_demux_relaxed() + * | 105: smp_muxed_ipi_set_message(): + * | 105: smb_mb() + * | 105: message[CALL_FUNCTION] = 1 + * | 105: doorbell_global_ipi(42): + * | 105: kvmppc_set_host_ipi(42) + * | // RE-ORDERED STORE COMPLETES + * -> 42: kvmppc_clear_host_ipi(42) + * 42: // returns to executing guest + * 105: ppc_msgsnd_sync()/smp_mb() + * 105: ppc_msgsnd() -> 42 + * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored + * 105: // hangs waiting on 42 to process messages/call_single_queue + */ +static inline void kvmppc_set_host_ipi(int cpu) { - paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi; + /* + * order stores of IPI messages vs. setting of host_ipi flag + * + * pairs with the barrier in kvmppc_clear_host_ipi() + */ + smp_mb(); + paca_ptrs[cpu]->kvm_hstate.host_ipi = 1; +} + +static inline void kvmppc_clear_host_ipi(int cpu) +{ + paca_ptrs[cpu]->kvm_hstate.host_ipi = 0; + /* + * order clearing of host_ipi flag vs. processing of IPI messages + * + * pairs with the barrier in kvmppc_set_host_ipi() + */ + smp_mb(); } static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) @@ -486,7 +577,10 @@ static inline u32 kvmppc_get_xics_latch(void) return 0; } -static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +static inline void kvmppc_set_host_ipi(int cpu) +{} + +static inline void kvmppc_clear_host_ipi(int cpu) {} static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 804b1a6196fa..f17ff1200eaa 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -33,7 +33,7 @@ void doorbell_global_ipi(int cpu) { u32 tag = get_hard_smp_processor_id(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); /* Order previous accesses vs. msgsnd, which is treated as a store */ ppc_msgsnd_sync(); ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); @@ -48,7 +48,7 @@ void doorbell_core_ipi(int cpu) { u32 tag = cpu_thread_in_core(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); /* Order previous accesses vs. msgsnd, which is treated as a store */ ppc_msgsnd_sync(); ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); @@ -84,7 +84,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); - kvmppc_set_host_ipi(smp_processor_id(), 0); + kvmppc_clear_host_ipi(smp_processor_id()); __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux_relaxed(); /* already performed the barrier */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 4d2ec77d806c..287d5911df0f 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -58,7 +58,7 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) hcpu = hcore << threads_shift; kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); - kvmppc_set_host_ipi(hcpu, 1); + kvmppc_set_host_ipi(hcpu); smp_mb(); kvmhv_rm_send_ipi(hcpu); } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 94cd96b9b7bb..fbd6e6b7bbf2 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -193,7 +193,7 @@ static void pnv_smp_cpu_kill_self(void) * for coming online, which are handled via * generic_check_cpu_restart() calls. */ - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); srr1 = pnv_cpu_offline(cpu); diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 485569ff7ef1..7d13d2ef5a90 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -140,7 +140,7 @@ static unsigned int icp_native_get_irq(void) static void icp_native_cause_ipi(int cpu) { - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); icp_native_set_qirr(cpu, IPI_PRIORITY); } @@ -179,7 +179,7 @@ void icp_native_flush_interrupt(void) if (vec == XICS_IPI) { /* Clear pending IPI */ int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); icp_native_set_qirr(cpu, 0xff); } else { pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n", @@ -200,7 +200,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); icp_native_set_qirr(cpu, 0xff); return smp_ipi_demux(); diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 8bb8dd7dd6ad..68fd2540b093 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -126,7 +126,7 @@ static void icp_opal_cause_ipi(int cpu) { int hw_cpu = get_hard_smp_processor_id(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); } @@ -134,7 +134,7 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); return smp_ipi_demux(); @@ -157,7 +157,7 @@ void icp_opal_flush_interrupt(void) if (vec == XICS_IPI) { /* Clear pending IPI */ int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); } else { pr_err("XICS: hw interrupt 0x%x to offline cpu, "