sh: update_mmu_cache() consolidation.

This splits out a separate __update_cache()/__update_tlb() for
update_mmu_cache() to wrap in to. This lets us share the common
__update_cache() bits while keeping special __update_tlb() handling
broken out.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2009-07-29 00:12:17 +09:00
parent 0dfae7d5a2
commit 9cef749269
7 changed files with 67 additions and 74 deletions

View File

@ -134,8 +134,19 @@ typedef pte_t *pte_addr_t;
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
struct vm_area_struct; struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
extern void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte); unsigned long address, pte_t pte);
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
__update_cache(vma, address, pte);
__update_tlb(vma, address, pte);
}
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void); extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end, extern void page_table_range_init(unsigned long start, unsigned long end,

View File

@ -134,3 +134,24 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
} }
EXPORT_SYMBOL(clear_user_highpage); EXPORT_SYMBOL(clear_user_highpage);
void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!boot_cpu_data.dcache.n_aliases)
return;
page = pfn_to_page(pfn);
if (pfn_valid(pfn) && page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
unsigned long addr = (unsigned long)page_address(page);
if (pages_do_alias(addr, address & PAGE_MASK))
__flush_wback_region((void *)addr, PAGE_SIZE);
}
}
}

View File

@ -46,10 +46,13 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
BUG(); BUG();
} }
void update_mmu_cache(struct vm_area_struct * vma, void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte) unsigned long address, pte_t pte)
{ {
BUG();
} }
void __init page_table_range_init(unsigned long start, unsigned long end, void __init page_table_range_init(unsigned long start, unsigned long end,

View File

@ -16,15 +16,14 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void update_mmu_cache(struct vm_area_struct * vma, void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
unsigned long address, pte_t pte)
{ {
unsigned long flags; unsigned long flags, pteval, vpn;
unsigned long pteval;
unsigned long vpn;
/* Ptrace may call this routine. */ /*
if (vma && current->active_mm != vma->vm_mm) * Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return; return;
local_irq_save(flags); local_irq_save(flags);

View File

@ -27,32 +27,16 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void update_mmu_cache(struct vm_area_struct * vma, void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
unsigned long address, pte_t pte)
{ {
unsigned long flags; unsigned long flags, pteval, vpn;
unsigned long pteval;
unsigned long vpn;
unsigned long pfn = pte_pfn(pte);
struct page *page;
/* Ptrace may call this routine. */ /*
if (vma && current->active_mm != vma->vm_mm) * Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return; return;
page = pfn_to_page(pfn);
if (pfn_valid(pfn) && page_mapping(page)) {
#if defined(CONFIG_SH7705_CACHE_32KB)
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
unsigned long addr = (unsigned long)page_address(page);
if (pages_do_alias(addr, address & PAGE_MASK))
__flush_wback_region((void *)addr, PAGE_SIZE);
}
#endif
}
local_irq_save(flags); local_irq_save(flags);
/* Set PTEH register */ /* Set PTEH register */
@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++) for (i = 0; i < ways; i++)
ctrl_outl(data, addr + (i << 8)); ctrl_outl(data, addr + (i << 8));
} }

View File

@ -15,33 +15,16 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void update_mmu_cache(struct vm_area_struct * vma, void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
unsigned long address, pte_t pte)
{ {
unsigned long flags; unsigned long flags, pteval, vpn;
unsigned long pteval;
unsigned long vpn;
unsigned long pfn = pte_pfn(pte);
struct page *page;
/* Ptrace may call this routine. */ /*
if (vma && current->active_mm != vma->vm_mm) * Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return; return;
page = pfn_to_page(pfn);
if (pfn_valid(pfn) && page_mapping(page)) {
#ifndef CONFIG_SMP
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
unsigned long addr = (unsigned long)page_address(page);
if (pages_do_alias(addr, address & PAGE_MASK))
__flush_wback_region((void *)addr, PAGE_SIZE);
}
#endif
}
local_irq_save(flags); local_irq_save(flags);
/* Set PTEH register */ /* Set PTEH register */

View File

@ -329,22 +329,6 @@ do_sigbus:
goto no_context; goto no_context;
} }
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
/*
* This appears to get called once for every pte entry that gets
* established => I don't think it's efficient to try refilling the
* TLBs with the pages - some may not get accessed even. Also, for
* executable pages, it is impossible to determine reliably here which
* TLB they should be mapped into (or both even).
*
* So, just do nothing here and handle faults on demand. In the
* TLBMISS handling case, the refill is now done anyway after the pte
* has been fixed up, so that deals with most useful cases.
*/
}
void local_flush_tlb_one(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
unsigned long long match, pteh=0, lpage; unsigned long long match, pteh=0, lpage;
@ -482,3 +466,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
/* FIXME: Optimize this later.. */ /* FIXME: Optimize this later.. */
flush_tlb_all(); flush_tlb_all();
} }
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
}