arm: Enable highmem for rt

fixup highmem for ARM.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2013-02-13 11:03:11 +01:00 committed by Alibek Omarov
parent d63ae71591
commit cd57734517
3 changed files with 57 additions and 8 deletions

View File

@ -4,6 +4,13 @@
#include <linux/thread_info.h>
#if defined CONFIG_PREEMPT_RT && defined CONFIG_HIGHMEM
void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
#else
static inline void
switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
#endif
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)

View File

@ -31,6 +31,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
return *ptep;
}
static unsigned int fixmap_idx(int type)
{
return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
}
void *kmap(struct page *page)
{
might_sleep();
@ -51,12 +56,13 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
preempt_disable();
preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@ -76,7 +82,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
idx = fixmap_idx(type);
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@ -90,7 +96,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
set_fixmap_pte(idx, mk_pte(page, kmap_prot));
#ifdef CONFIG_PREEMPT_RT
current->kmap_pte[type] = pte;
#endif
set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
@ -103,10 +112,13 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
idx = fixmap_idx(type);
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_PREEMPT_RT
current->kmap_pte[type] = __pte(0);
#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
#else
@ -119,28 +131,56 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
preempt_enable();
preempt_enable_nort();
}
EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
struct page *page = pfn_to_page(pfn);
preempt_disable();
preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
idx = fixmap_idx(type);
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
#ifdef CONFIG_PREEMPT_RT
current->kmap_pte[type] = pte;
#endif
set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
#if defined CONFIG_PREEMPT_RT
void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
{
int i;
/*
* Clear @prev's kmap_atomic mappings
*/
for (i = 0; i < prev_p->kmap_idx; i++) {
int idx = fixmap_idx(i);
set_fixmap_pte(idx, __pte(0));
}
/*
* Restore @next_p's kmap_atomic mappings
*/
for (i = 0; i < next_p->kmap_idx; i++) {
int idx = fixmap_idx(i);
if (!pte_none(next_p->kmap_pte[i]))
set_fixmap_pte(idx, next_p->kmap_pte[i]);
}
}
#endif

View File

@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>