s390/tlb: Convert to generic mmu_gather

No change in behavior intended.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: heiko.carstens@de.ibm.com
Cc: linux@armlinux.org.uk
Cc: npiggin@gmail.com
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/20180918125151.31744-3-schwidefsky@de.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Martin Schwidefsky 2018-09-18 14:51:51 +02:00 committed by Ingo Molnar
parent 952a31c9e6
commit 9de7d833e3
3 changed files with 43 additions and 152 deletions

View File

@ -164,11 +164,13 @@ config S390
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MMU_GATHER_NO_GATHER
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NOP_MCOUNT select HAVE_NOP_MCOUNT
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PCI select HAVE_PCI
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS

View File

@ -22,98 +22,39 @@
* Pages used for the page tables is a different story. FIXME: more * Pages used for the page tables is a different story. FIXME: more
*/ */
#include <linux/mm.h> void __tlb_remove_table(void *_table);
#include <linux/pagemap.h> static inline void tlb_flush(struct mmu_gather *tlb);
#include <linux/swap.h> static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
#include <asm/processor.h> struct page *page, int page_size);
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb
#define pmd_free_tlb pmd_free_tlb
#define p4d_free_tlb p4d_free_tlb
#define pud_free_tlb pud_free_tlb
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/tlb.h>
struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
unsigned long start, end;
};
struct mmu_table_batch {
struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
__tlb_flush_mm_lazy(tlb->mm);
}
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
tlb_table_flush(tlb);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
tlb->start = start;
tlb->end = end;
}
tlb_flush_mmu(tlb);
}
/* /*
* Release the page cache reference for a pte removed by * Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache. * has already been freed, so just do free_page_and_swap_cache.
*/ */
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
return __tlb_remove_page(tlb, page); free_page_and_swap_cache(page);
return false;
} }
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_flush(struct mmu_gather *tlb)
struct page *page, int page_size)
{ {
return tlb_remove_page(tlb, page); __tlb_flush_mm_lazy(tlb->mm);
} }
/* /*
@ -123,6 +64,15 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address) unsigned long address)
{ {
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_ptes = 1;
/*
* page_table_free_rcu takes care of the allocation bit masks
* of the 2K table fragments in the 4K page table page,
* then calls tlb_remove_table.
*/
page_table_free_rcu(tlb, (unsigned long *) pte, address); page_table_free_rcu(tlb, (unsigned long *) pte, address);
} }
@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (mm_pmd_folded(tlb->mm)) if (mm_pmd_folded(tlb->mm))
return; return;
pgtable_pmd_page_dtor(virt_to_page(pmd)); pgtable_pmd_page_dtor(virt_to_page(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
tlb_remove_table(tlb, pmd); tlb_remove_table(tlb, pmd);
} }
@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
{ {
if (mm_p4d_folded(tlb->mm)) if (mm_p4d_folded(tlb->mm))
return; return;
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, p4d); tlb_remove_table(tlb, p4d);
} }
@ -169,19 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
{ {
if (mm_pud_folded(tlb->mm)) if (mm_pud_folded(tlb->mm))
return; return;
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
tlb_remove_table(tlb, pud); tlb_remove_table(tlb, pud);
} }
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
{
}
#endif /* _S390_TLB_H */ #endif /* _S390_TLB_H */

View File

@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
} }
static void __tlb_remove_table(void *_table) void __tlb_remove_table(void *_table)
{ {
unsigned int mask = (unsigned long) _table & 3; unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask); void *table = (void *)((unsigned long) _table ^ mask);
@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
} }
} }
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely
* on IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
tlb->mm->context.flush_mm = 1;
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
__tlb_flush_mm_lazy(tlb->mm);
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_flush_mmu(tlb);
}
/* /*
* Base infrastructure required to generate basic asces, region, segment, * Base infrastructure required to generate basic asces, region, segment,
* and page tables that do not make use of enhanced features like EDAT1. * and page tables that do not make use of enhanced features like EDAT1.