diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 228cf0b295db..f26280d9e88d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -268,7 +268,7 @@ struct _lowcore { __u64 vdso_per_cpu_data; /* 0x0358 */ __u64 machine_flags; /* 0x0360 */ __u64 ftrace_func; /* 0x0368 */ - __u64 sie_hook; /* 0x0370 */ + __u64 gmap; /* 0x0370 */ __u64 cmf_hpp; /* 0x0378 */ /* Interrupt response block. */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 82d0847896a0..4506791adcd5 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { unsigned int flush_mm; spinlock_t list_lock; struct list_head pgtable_list; + struct list_head gmap_list; unsigned long asce_bits; unsigned long asce_limit; unsigned long vdso_base; @@ -17,6 +18,7 @@ typedef struct { #define INIT_MM_CONTEXT(name) \ .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ - .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), + .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ + .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), #endif diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 38e71ebcd3c2..8eef9b5b3cf4 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -20,7 +20,7 @@ unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); -unsigned long *page_table_alloc(struct mm_struct *); +unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); #ifdef CONFIG_HAVE_RCU_TABLE_FREE void page_table_free_rcu(struct mmu_gather *, unsigned long *); @@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { spin_lock_init(&mm->context.list_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); return (pgd_t *) crst_table_alloc(mm); } #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) @@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm, /* * page table entry allocation/free routines. */ -#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) -#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) +#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 801fbe1d837d..519eb5f187ef 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) #endif } +/** + * struct gmap_struct - guest address space + * @mm: pointer to the parent mm_struct + * @table: pointer to the page directory + * @crst_list: list of all crst tables used in the guest address space + */ +struct gmap { + struct list_head list; + struct mm_struct *mm; + unsigned long *table; + struct list_head crst_list; +}; + +/** + * struct gmap_rmap - reverse mapping for segment table entries + * @next: pointer to the next gmap_rmap structure in the list + * @entry: pointer to a segment table entry + */ +struct gmap_rmap { + struct list_head list; + unsigned long *entry; +}; + +/** + * struct gmap_pgtable - gmap information attached to a page table + * @vmaddr: address of the 1MB segment in the process virtual memory + * @mapper: list of segment table entries maping a page table + */ +struct gmap_pgtable { + unsigned long vmaddr; + struct list_head mapper; +}; + +struct gmap *gmap_alloc(struct mm_struct *mm); +void gmap_free(struct gmap *gmap); +void gmap_enable(struct gmap *gmap); +void gmap_disable(struct gmap *gmap); +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long length); +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long gmap_fault(unsigned long address, struct gmap *); + /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 1300c3025334..55dfcc8bdc0d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -80,6 +80,7 @@ struct thread_struct { mm_segment_t mm_segment; unsigned long prot_addr; /* address of protection-excep. */ unsigned int trap_no; + unsigned long gmap_addr; /* address of last gmap fault. */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ /* pfault_wait is used to block the process on a pfault event */ diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index b7a4f2eb0057..304445382382 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * on all cpus instead of doing a local flush if the mm * only ran on the local cpu. */ - if (MACHINE_HAS_IDTE) + if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_idte((unsigned long) mm->pgd | mm->context.asce_bits); else diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index edfbd17d7082..05d8f38734ec 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -151,7 +151,7 @@ int main(void) DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); - DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); + DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); #endif /* CONFIG_32BIT */ return 0; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 095f782a5512..9564fc779b27 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -303,9 +303,24 @@ static inline int do_exception(struct pt_regs *regs, int access, flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; -retry: down_read(&mm->mmap_sem); +#ifdef CONFIG_PGSTE + if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { + address = gmap_fault(address, + (struct gmap *) S390_lowcore.gmap); + if (address == -EFAULT) { + fault = VM_FAULT_BADMAP; + goto out_up; + } + if (address == -ENOMEM) { + fault = VM_FAULT_OOM; + goto out_up; + } + } +#endif + +retry: fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) @@ -356,6 +371,7 @@ retry: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + down_read(&mm->mmap_sem); goto retry; } } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index a4d856db9154..597bb2d27c3c 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page) if (MACHINE_HAS_HPAGE) return 0; - ptep = (pte_t *) pte_alloc_one(&init_mm, address); + ptep = (pte_t *) pte_alloc_one(&init_mm, addr); if (!ptep) return -ENOMEM; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 37a23c223705..2adb23938a7f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -133,6 +134,413 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) } #endif +#ifdef CONFIG_PGSTE + +/** + * gmap_alloc - allocate a guest address space + * @mm: pointer to the parent mm_struct + * + * Returns a guest address space structure. + */ +struct gmap *gmap_alloc(struct mm_struct *mm) +{ + struct gmap *gmap; + struct page *page; + unsigned long *table; + + gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); + if (!gmap) + goto out; + INIT_LIST_HEAD(&gmap->crst_list); + gmap->mm = mm; + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + goto out_free; + list_add(&page->lru, &gmap->crst_list); + table = (unsigned long *) page_to_phys(page); + crst_table_init(table, _REGION1_ENTRY_EMPTY); + gmap->table = table; + list_add(&gmap->list, &mm->context.gmap_list); + return gmap; + +out_free: + kfree(gmap); +out: + return NULL; +} +EXPORT_SYMBOL_GPL(gmap_alloc); + +static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) +{ + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct page *page; + + if (*table & _SEGMENT_ENTRY_INV) + return 0; + page = pfn_to_page(*table >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry(rmap, &mp->mapper, list) { + if (rmap->entry != table) + continue; + list_del(&rmap->list); + kfree(rmap); + break; + } + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + return 1; +} + +static void gmap_flush_tlb(struct gmap *gmap) +{ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); +} + +/** + * gmap_free - free a guest address space + * @gmap: pointer to the guest address space structure + */ +void gmap_free(struct gmap *gmap) +{ + struct page *page, *next; + unsigned long *table; + int i; + + + /* Flush tlb. */ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); + + /* Free all segment & region tables. */ + down_read(&gmap->mm->mmap_sem); + list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { + table = (unsigned long *) page_to_phys(page); + if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) + /* Remove gmap rmap structures for segment table. */ + for (i = 0; i < PTRS_PER_PMD; i++, table++) + gmap_unlink_segment(gmap, table); + __free_pages(page, ALLOC_ORDER); + } + up_read(&gmap->mm->mmap_sem); + list_del(&gmap->list); + kfree(gmap); +} +EXPORT_SYMBOL_GPL(gmap_free); + +/** + * gmap_enable - switch primary space to the guest address space + * @gmap: pointer to the guest address space structure + */ +void gmap_enable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | __pa(gmap->table); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = (unsigned long) gmap; +} +EXPORT_SYMBOL_GPL(gmap_enable); + +/** + * gmap_disable - switch back to the standard primary address space + * @gmap: pointer to the guest address space structure + */ +void gmap_disable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = + gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = 0UL; +} +EXPORT_SYMBOL_GPL(gmap_disable); + +static int gmap_alloc_table(struct gmap *gmap, + unsigned long *table, unsigned long init) +{ + struct page *page; + unsigned long *new; + + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + return -ENOMEM; + new = (unsigned long *) page_to_phys(page); + crst_table_init(new, init); + down_read(&gmap->mm->mmap_sem); + if (*table & _REGION_ENTRY_INV) { + list_add(&page->lru, &gmap->crst_list); + *table = (unsigned long) new | _REGION_ENTRY_LENGTH | + (*table & _REGION_ENTRY_TYPE_MASK); + } else + __free_pages(page, ALLOC_ORDER); + up_read(&gmap->mm->mmap_sem); + return 0; +} + +/** + * gmap_unmap_segment - unmap segment from the guest address space + * @gmap: pointer to the guest address space structure + * @addr: address in the guest address space + * @len: length of the memory area to unmap + * + * Returns 0 if the unmap succeded, -EINVAL if not. + */ +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the guest addr space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Clear segment table entry in guest address space. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV; + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; +} +EXPORT_SYMBOL_GPL(gmap_unmap_segment); + +/** + * gmap_mmap_segment - map a segment to the guest address space + * @gmap: pointer to the guest address space structure + * @from: source address in the parent address space + * @to: target address in the guest address space + * + * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + */ +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((from | to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || from + len > PGDIR_SIZE || + from + len < from || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the gmap address space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Store 'from' address in an invalid segment table entry. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; + +out_unmap: + up_read(&gmap->mm->mmap_sem); + gmap_unmap_segment(gmap, to, len); + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(gmap_map_segment); + +unsigned long gmap_fault(unsigned long address, struct gmap *gmap) +{ + unsigned long *table, vmaddr, segment; + struct mm_struct *mm; + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct vm_area_struct *vma; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + current->thread.gmap_addr = address; + mm = gmap->mm; + /* Walk the gmap address space page table */ + table = gmap->table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 20) & 0x7ff); + + /* Convert the gmap address to an mm address. */ + segment = *table; + if (likely(!(segment & _SEGMENT_ENTRY_INV))) { + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + return mp->vmaddr | (address & ~PMD_MASK); + } else if (segment & _SEGMENT_ENTRY_RO) { + vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; + vma = find_vma(mm, vmaddr); + if (!vma || vma->vm_start > vmaddr) + return -EFAULT; + + /* Walk the parent mm page table */ + pgd = pgd_offset(mm, vmaddr); + pud = pud_alloc(mm, pgd, vmaddr); + if (!pud) + return -ENOMEM; + pmd = pmd_alloc(mm, pud, vmaddr); + if (!pmd) + return -ENOMEM; + if (!pmd_present(*pmd) && + __pte_alloc(mm, vma, pmd, vmaddr)) + return -ENOMEM; + /* pmd now points to a valid segment table entry. */ + rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); + if (!rmap) + return -ENOMEM; + /* Link gmap segment table entry location to page table. */ + page = pmd_page(*pmd); + mp = (struct gmap_pgtable *) page->index; + rmap->entry = table; + list_add(&rmap->list, &mp->mapper); + /* Set gmap segment table entry to page table. */ + *table = pmd_val(*pmd) & PAGE_MASK; + return vmaddr | (address & ~PMD_MASK); + } + return -EFAULT; + +} +EXPORT_SYMBOL_GPL(gmap_fault); + +void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) +{ + struct gmap_rmap *rmap, *next; + struct gmap_pgtable *mp; + struct page *page; + int flush; + + flush = 0; + spin_lock(&mm->page_table_lock); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry_safe(rmap, next, &mp->mapper, list) { + *rmap->entry = + _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + list_del(&rmap->list); + kfree(rmap); + flush = 1; + } + spin_unlock(&mm->page_table_lock); + if (flush) + __tlb_flush_global(); +} + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) +{ + struct page *page; + unsigned long *table; + struct gmap_pgtable *mp; + + page = alloc_page(GFP_KERNEL|__GFP_REPEAT); + if (!page) + return NULL; + mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); + if (!mp) { + __free_page(page); + return NULL; + } + pgtable_page_ctor(page); + mp->vmaddr = vmaddr & PMD_MASK; + INIT_LIST_HEAD(&mp->mapper); + page->index = (unsigned long) mp; + atomic_set(&page->_mapcount, 3); + table = (unsigned long *) page_to_phys(page); + clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); + clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + return table; +} + +static inline void page_table_free_pgste(unsigned long *table) +{ + struct page *page; + struct gmap_pgtable *mp; + + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + BUG_ON(!list_empty(&mp->mapper)); + pgtable_page_ctor(page); + atomic_set(&page->_mapcount, -1); + kfree(mp); + __free_page(page); +} + +#else /* CONFIG_PGSTE */ + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) +{ +} + +static inline void page_table_free_pgste(unsigned long *table) +{ +} + +static inline void gmap_unmap_notifier(struct mm_struct *mm, + unsigned long *table) +{ +} + +#endif /* CONFIG_PGSTE */ + static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) { unsigned int old, new; @@ -147,44 +555,14 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) /* * page table entry allocation/free routines. */ -#ifdef CONFIG_PGSTE -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) -{ - struct page *page; - unsigned long *table; - - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (!page) - return NULL; - pgtable_page_ctor(page); - atomic_set(&page->_mapcount, 3); - table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); - return table; -} - -static inline void page_table_free_pgste(unsigned long *table) -{ - struct page *page; - - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - pgtable_page_ctor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); -} -#endif - -unsigned long *page_table_alloc(struct mm_struct *mm) +unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) { struct page *page; unsigned long *table; unsigned int mask, bit; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) - return page_table_alloc_pgste(mm); -#endif + return page_table_alloc_pgste(mm, vmaddr); /* Allocate fragments of a 4K page as 1K/2K page table */ spin_lock_bh(&mm->context.list_lock); mask = FRAG_MASK; @@ -222,10 +600,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned int bit, mask; -#ifdef CONFIG_PGSTE - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); return page_table_free_pgste(table); -#endif + } /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); @@ -249,10 +627,8 @@ static void __page_table_free_rcu(void *table, unsigned bit) { struct page *page; -#ifdef CONFIG_PGSTE if (bit == FRAG_MASK) return page_table_free_pgste(table); -#endif /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); if (atomic_xor_bits(&page->_mapcount, bit) == 0) { @@ -269,13 +645,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) unsigned int bit, mask; mm = tlb->mm; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; } -#endif bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock_bh(&mm->context.list_lock); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8c1970d1dd91..781ff5169560 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __ref *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(unsigned long address) { pte_t *pte; if (slab_is_available()) - pte = (pte_t *) page_table_alloc(&init_mm); + pte = (pte_t *) page_table_alloc(&init_mm, address); else pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) @@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } #endif if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); @@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir);