diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 952eba6701f4..fbbd3f6f0064 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -385,15 +385,15 @@ BEGIN_FTR_SECTION oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l - /* Update the last bolted SLB */ + /* Update the last bolted SLB. No write barriers are needed + * here, provided we only update the current CPU's SLB shadow + * buffer. + */ ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ - eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ - eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ - eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ff1811ac6c81..4bee1cfa9dea 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -59,14 +59,12 @@ static inline void slb_shadow_update(unsigned long ea, { /* * Clear the ESID first so the entry is not valid while we are - * updating it. + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - smp_wmb(); get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); - smp_wmb(); get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); - smp_wmb(); } static inline void slb_shadow_clear(unsigned long entry)