powerpc: Introduce local (non-broadcast) forms of tlb invalidates
Introduced a new set of low level tlb invalidate functions that do not broadcast invalidates on the bus: _tlbil_all - invalidate all _tlbil_pid - invalidate based on process id (or mm context) _tlbil_va - invalidate based on virtual address (ea + pid) On non-SMP configs _tlbil_all should be functionally equivalent to _tlbia and _tlbil_va should be functionally equivalent to _tlbie. The intent of this change is to handle SMP based invalidates via IPIs instead of broadcasts as the mechanism scales better for larger number of cores. On e500 (fsl-booke mmu) based cores move to using MMUCSR for invalidate alls and tlbsx/tlbwe for invalidate virtual address. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
This commit is contained in:
parent
1afb7f809b
commit
0ba3418b8b
|
@ -109,6 +109,7 @@
|
|||
#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
|
||||
#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
|
||||
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
|
||||
#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
|
||||
#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
|
||||
#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
|
||||
#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
|
||||
|
@ -410,6 +411,12 @@
|
|||
#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
|
||||
#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
|
||||
|
||||
/* Bit definitions for MMUCSR0 */
|
||||
#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
|
||||
#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
|
||||
#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
|
||||
#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
|
||||
|
||||
/* Bit definitions for SGR. */
|
||||
#define SGR_NORMAL 0 /* Speculative fetching allowed. */
|
||||
#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
extern void _tlbie(unsigned long address, unsigned int pid);
|
||||
extern void _tlbil_all(void);
|
||||
extern void _tlbil_pid(unsigned int pid);
|
||||
extern void _tlbil_va(unsigned long address, unsigned int pid);
|
||||
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
|
||||
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
|
||||
|
@ -38,31 +41,31 @@ extern void _tlbia(void);
|
|||
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(mm->context.id);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
_tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
_tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
_tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(vma->vm_mm->context.id);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(0);
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PPC32)
|
||||
|
|
|
@ -274,6 +274,10 @@ _GLOBAL(real_writeb)
|
|||
/*
|
||||
* Flush MMU TLB
|
||||
*/
|
||||
#ifndef CONFIG_FSL_BOOKE
|
||||
_GLOBAL(_tlbil_all)
|
||||
_GLOBAL(_tlbil_pid)
|
||||
#endif
|
||||
_GLOBAL(_tlbia)
|
||||
#if defined(CONFIG_40x)
|
||||
sync /* Flush to memory before changing mapping */
|
||||
|
@ -344,6 +348,9 @@ _GLOBAL(_tlbia)
|
|||
/*
|
||||
* Flush MMU TLB for a particular address
|
||||
*/
|
||||
#ifndef CONFIG_FSL_BOOKE
|
||||
_GLOBAL(_tlbil_va)
|
||||
#endif
|
||||
_GLOBAL(_tlbie)
|
||||
#if defined(CONFIG_40x)
|
||||
/* We run the search with interrupts disabled because we have to change
|
||||
|
@ -436,6 +443,53 @@ _GLOBAL(_tlbie)
|
|||
#endif /* ! CONFIG_40x */
|
||||
blr
|
||||
|
||||
#if defined(CONFIG_FSL_BOOKE)
|
||||
/*
|
||||
* Flush MMU TLB, but only on the local processor (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_all)
|
||||
#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
|
||||
MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
|
||||
li r3,(MMUCSR0_TLBFI)@l
|
||||
mtspr SPRN_MMUCSR0, r3
|
||||
1:
|
||||
mfspr r3,SPRN_MMUCSR0
|
||||
andi. r3,r3,MMUCSR0_TLBFI@l
|
||||
bne 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Flush MMU TLB for a particular process id, but only on the local processor
|
||||
* (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_pid)
|
||||
/* we currently do an invalidate all since we don't have per pid invalidate */
|
||||
li r3,(MMUCSR0_TLBFI)@l
|
||||
mtspr SPRN_MMUCSR0, r3
|
||||
1:
|
||||
mfspr r3,SPRN_MMUCSR0
|
||||
andi. r3,r3,MMUCSR0_TLBFI@l
|
||||
bne 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Flush MMU TLB for a particular address, but only on the local processor
|
||||
* (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_va)
|
||||
slwi r4,r4,16
|
||||
mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
|
||||
tlbsx 0,r3
|
||||
mfspr r4,SPRN_MAS1 /* check valid */
|
||||
andis. r3,r4,MAS1_VALID@h
|
||||
beqlr
|
||||
rlwinm r4,r4,0,1,31
|
||||
mtspr SPRN_MAS1,r4
|
||||
tlbwe
|
||||
blr
|
||||
#endif /* CONFIG_FSL_BOOKE */
|
||||
|
||||
|
||||
/*
|
||||
* Flush instruction cache.
|
||||
* This is a no-op on the 601.
|
||||
|
|
|
@ -119,6 +119,9 @@ EXPORT_SYMBOL(flush_instruction_cache);
|
|||
EXPORT_SYMBOL(flush_tlb_kernel_range);
|
||||
EXPORT_SYMBOL(flush_tlb_page);
|
||||
EXPORT_SYMBOL(_tlbie);
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
|
||||
EXPORT_SYMBOL(_tlbil_va);
|
||||
#endif
|
||||
#endif
|
||||
EXPORT_SYMBOL(__flush_icache_range);
|
||||
EXPORT_SYMBOL(flush_dcache_range);
|
||||
|
|
Loading…
Reference in New Issue