diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7330150bfe34..4e14d2304d5f 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -126,4 +126,7 @@ extern int __ucmpdi2(u64, u64); void _mcount(void); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); +void pnv_power9_force_smt4_catch(void); +void pnv_power9_force_smt4_release(void); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b62c31037cad..4803cc1b011b 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -32,6 +32,7 @@ #include #include #include +#include register struct paca_struct *local_paca asm("r13"); @@ -177,6 +178,8 @@ struct paca_struct { u8 thread_mask; /* Mask to denote subcore sibling threads */ u8 subcore_sibling_mask; + /* Flag to request this thread not to stop */ + atomic_t dont_stop; /* * Pointer to an array which contains pointer * to the sibling threads' paca. diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index dc5f6a5d4575..d1c2d2e658cf 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -40,6 +40,7 @@ static inline int pnv_npu2_handle_fault(struct npu_context *context, } static inline void pnv_tm_init(void) { } +static inline void pnv_power9_force_smt4(void) { } #endif #endif /* _ASM_POWERNV_H */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ea5eb91b836e..dbefe30d4daa 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -759,6 +759,7 @@ int main(void) OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas); OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr); + OFFSET(PACA_DONT_STOP, paca_struct, dont_stop); #define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f) STOP_SPR(STOP_PID, pid); STOP_SPR(STOP_LDBAR, ldbar); diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 01e1c1997893..89157cf452e3 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -339,6 +339,7 @@ power_enter_stop: bne .Lhandle_esl_ec_set PPC_STOP li r3,0 /* Since we didn't lose state, return 0 */ + std r3, PACA_REQ_PSSCR(r13) /* * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so @@ -429,11 +430,29 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ * r3 contains desired PSSCR register value. */ _GLOBAL(power9_idle_stop) +BEGIN_FTR_SECTION + lwz r5, PACA_DONT_STOP(r13) + cmpwi r5, 0 + bne 1f std r3, PACA_REQ_PSSCR(r13) + sync + lwz r5, PACA_DONT_STOP(r13) + cmpwi r5, 0 + bne 1f +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) mtspr SPRN_PSSCR,r3 LOAD_REG_ADDR(r4,power_enter_stop) b pnv_powersave_common /* No return */ +1: + /* + * We get here when TM / thread reconfiguration bug workaround + * code wants to get the CPU into SMT4 mode, and therefore + * we are being asked not to stop. + */ + li r3, 0 + std r3, PACA_REQ_PSSCR(r13) + blr /* return 0 for wakeup cause / SRR1 value */ /* * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, @@ -584,6 +603,8 @@ FTR_SECTION_ELSE_NESTED(71) mfspr r5, SPRN_PSSCR rldicl r5,r5,4,60 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) + li r0, 0 /* clear requested_psscr to say we're awake */ + std r0, PACA_REQ_PSSCR(r13) cmpd cr4,r5,r4 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 443d5ca71995..99a760eae964 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "powernv.h" #include "subcore.h" @@ -387,6 +388,86 @@ void power9_idle(void) power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask); } +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * This is used in working around bugs in thread reconfiguration + * on POWER9 (at least up to Nimbus DD2.2) relating to transactional + * memory and the way that XER[SO] is checkpointed. + * This function forces the core into SMT4 in order by asking + * all other threads not to stop, and sending a message to any + * that are in a stop state. + * Must be called with preemption disabled. + * + * DO NOT call this unless cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG) is + * true; otherwise this function will hang the system, due to the + * optimization in power9_idle_stop. + */ +void pnv_power9_force_smt4_catch(void) +{ + int cpu, cpu0, thr; + struct paca_struct *tpaca; + int awake_threads = 1; /* this thread is awake */ + int poke_threads = 0; + int need_awake = threads_per_core; + + cpu = smp_processor_id(); + cpu0 = cpu & ~(threads_per_core - 1); + tpaca = &paca[cpu0]; + for (thr = 0; thr < threads_per_core; ++thr) { + if (cpu != cpu0 + thr) + atomic_inc(&tpaca[thr].dont_stop); + } + /* order setting dont_stop vs testing requested_psscr */ + mb(); + for (thr = 0; thr < threads_per_core; ++thr) { + if (!tpaca[thr].requested_psscr) + ++awake_threads; + else + poke_threads |= (1 << thr); + } + + /* If at least 3 threads are awake, the core is in SMT4 already */ + if (awake_threads < need_awake) { + /* We have to wake some threads; we'll use msgsnd */ + for (thr = 0; thr < threads_per_core; ++thr) { + if (poke_threads & (1 << thr)) { + ppc_msgsnd_sync(); + ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, + tpaca[thr].hw_cpu_id); + } + } + /* now spin until at least 3 threads are awake */ + do { + for (thr = 0; thr < threads_per_core; ++thr) { + if ((poke_threads & (1 << thr)) && + !tpaca[thr].requested_psscr) { + ++awake_threads; + poke_threads &= ~(1 << thr); + } + } + } while (awake_threads < need_awake); + } +} +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); + +void pnv_power9_force_smt4_release(void) +{ + int cpu, cpu0, thr; + struct paca_struct *tpaca; + + cpu = smp_processor_id(); + cpu0 = cpu & ~(threads_per_core - 1); + tpaca = &paca[cpu0]; + + /* clear all the dont_stop flags */ + for (thr = 0; thr < threads_per_core; ++thr) { + if (cpu != cpu0 + thr) + atomic_dec(&tpaca[thr].dont_stop); + } +} +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ + #ifdef CONFIG_HOTPLUG_CPU static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) {