arm-enable-highmem-for-rt.patch

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2013-02-13 11:03:11 +01:00 committed by Alibek Omarov
parent 6f156f654f
commit 4fa1901f89
4 changed files with 49 additions and 3 deletions

View File

@ -1790,7 +1790,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
depends on MMU && !PREEMPT_RT_FULL
depends on MMU
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address

View File

@ -3,6 +3,13 @@
#include <linux/thread_info.h>
#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
#else
static inline void
switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
#endif
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)

View File

@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
set_top_pte(vaddr, mk_pte(page, kmap_prot));
#ifdef CONFIG_PREEMPT_RT_FULL
current->kmap_pte[type] = pte;
#endif
set_top_pte(vaddr, pte);
return (void *)vaddr;
}
@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_PREEMPT_RT_FULL
current->kmap_pte[type] = __pte(0);
#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#else
@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
#ifdef CONFIG_PREEMPT_RT_FULL
current->kmap_pte[type] = pte;
#endif
set_top_pte(vaddr, pte);
return (void *)vaddr;
}
@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const void *ptr)
return pte_page(get_top_pte(vaddr));
}
#if defined CONFIG_PREEMPT_RT_FULL
void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
{
int i;
/*
* Clear @prev's kmap_atomic mappings
*/
for (i = 0; i < prev_p->kmap_idx; i++) {
int idx = i + KM_TYPE_NR * smp_processor_id();
set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
}
/*
* Restore @next_p's kmap_atomic mappings
*/
for (i = 0; i < next_p->kmap_idx; i++) {
int idx = i + KM_TYPE_NR * smp_processor_id();
if (!pte_none(next_p->kmap_pte[i]))
set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
next_p->kmap_pte[i]);
}
}
#endif

View File

@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>