1edc17832d
Davis S. Miller wrote: " The way we do that now is overkill. We only needed to use the MMU cache ops when we had sun4c around because sun4c lacked support for the "flush" instruction. But all sun4m and later chips have it so we can use it unconditionally. So in the per_cpu_patch() code, get rid of the cache ops invocation, and instead execute a "flush %reg" after each of the instruction patch assignments, where %reg is set to the address of the instruction that was stored into. Perhaps take the flushi() definition from asm/cacheflush_64.h and place it into asm/cacheflush.h, then you can simply use that. " Implemented as per suggestion. Moved run-time patching before we call paging_init(), so helper methods in paging_init() may utilise run-time patching too. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
85 lines
2.7 KiB
C
85 lines
2.7 KiB
C
#ifndef _SPARC64_CACHEFLUSH_H
|
|
#define _SPARC64_CACHEFLUSH_H
|
|
|
|
#include <asm/page.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/mm.h>
|
|
|
|
/* Cache flush operations. */
|
|
#define flushw_all() __asm__ __volatile__("flushw")
|
|
|
|
extern void __flushw_user(void);
|
|
#define flushw_user() __flushw_user()
|
|
|
|
#define flush_user_windows flushw_user
|
|
#define flush_register_windows flushw_all
|
|
|
|
/* These are the same regardless of whether this is an SMP kernel or not. */
|
|
#define flush_cache_mm(__mm) \
|
|
do { if ((__mm) == current->mm) flushw_user(); } while(0)
|
|
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
|
#define flush_cache_range(vma, start, end) \
|
|
flush_cache_mm((vma)->vm_mm)
|
|
#define flush_cache_page(vma, page, pfn) \
|
|
flush_cache_mm((vma)->vm_mm)
|
|
|
|
/*
|
|
* On spitfire, the icache doesn't snoop local stores and we don't
|
|
* use block commit stores (which invalidate icache lines) during
|
|
* module load, so we need this.
|
|
*/
|
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
|
extern void __flush_icache_page(unsigned long);
|
|
|
|
extern void __flush_dcache_page(void *addr, int flush_icache);
|
|
extern void flush_dcache_page_impl(struct page *page);
|
|
#ifdef CONFIG_SMP
|
|
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
|
|
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
|
|
#else
|
|
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
|
|
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
|
|
#endif
|
|
|
|
extern void __flush_dcache_range(unsigned long start, unsigned long end);
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
extern void flush_dcache_page(struct page *page);
|
|
|
|
#define flush_icache_page(vma, pg) do { } while(0)
|
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
|
|
extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
|
|
unsigned long uaddr, void *kaddr,
|
|
unsigned long len, int write);
|
|
|
|
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
|
memcpy(dst, src, len); \
|
|
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
|
|
} while (0)
|
|
|
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
|
do { \
|
|
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
|
memcpy(dst, src, len); \
|
|
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
|
|
} while (0)
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
#define flush_cache_vmap(start, end) do { } while (0)
|
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
/* internal debugging function */
|
|
void kernel_map_pages(struct page *page, int numpages, int enable);
|
|
#endif
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _SPARC64_CACHEFLUSH_H */
|