2007-04-30 08:30:56 +02:00
|
|
|
#ifndef _ASM_POWERPC_PGALLOC_64_H
|
|
|
|
#define _ASM_POWERPC_PGALLOC_64_H
|
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 22:35:13 +01:00
|
|
|
#ifndef CONFIG_PPC_SUBPAGE_PROT
|
|
|
|
static inline void subpage_prot_free(pgd_t *pgd) {}
|
|
|
|
#endif
|
|
|
|
|
2007-04-30 08:30:56 +02:00
|
|
|
extern struct kmem_cache *pgtable_cache[];
|
|
|
|
|
2007-05-09 06:38:48 +02:00
|
|
|
#define PGD_CACHE_NUM 0
|
|
|
|
#define PUD_CACHE_NUM 1
|
|
|
|
#define PMD_CACHE_NUM 1
|
|
|
|
#define HUGEPTE_CACHE_NUM 2
|
2008-07-24 06:27:56 +02:00
|
|
|
#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
|
2007-04-30 08:30:56 +02:00
|
|
|
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
2008-02-05 07:29:14 +01:00
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
2007-04-30 08:30:56 +02:00
|
|
|
{
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 22:35:13 +01:00
|
|
|
subpage_prot_free(pgd);
|
2007-04-30 08:30:56 +02:00
|
|
|
kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
|
|
|
|
|
|
|
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
|
|
|
|
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
|
|
|
|
GFP_KERNEL|__GFP_REPEAT);
|
|
|
|
}
|
|
|
|
|
2008-02-05 07:29:14 +01:00
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
2007-04-30 08:30:56 +02:00
|
|
|
{
|
|
|
|
kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
|
|
{
|
|
|
|
pud_set(pud, (unsigned long)pmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define pmd_populate(mm, pmd, pte_page) \
|
|
|
|
pmd_populate_kernel(mm, pmd, page_address(pte_page))
|
|
|
|
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
|
2008-02-08 13:22:04 +01:00
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
2007-04-30 08:30:56 +02:00
|
|
|
|
|
|
|
|
|
|
|
#else /* CONFIG_PPC_64K_PAGES */
|
|
|
|
|
|
|
|
#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
|
|
|
|
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
pte_t *pte)
|
|
|
|
{
|
|
|
|
pmd_set(pmd, (unsigned long)pte);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define pmd_populate(mm, pmd, pte_page) \
|
|
|
|
pmd_populate_kernel(mm, pmd, page_address(pte_page))
|
2008-02-08 13:22:04 +01:00
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
2007-04-30 08:30:56 +02:00
|
|
|
|
|
|
|
#endif /* CONFIG_PPC_64K_PAGES */
|
|
|
|
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
|
|
|
|
GFP_KERNEL|__GFP_REPEAT);
|
|
|
|
}
|
|
|
|
|
2008-02-05 07:29:14 +01:00
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
2007-04-30 08:30:56 +02:00
|
|
|
{
|
|
|
|
kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
|
|
unsigned long address)
|
|
|
|
{
|
2007-05-09 06:38:48 +02:00
|
|
|
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
2007-04-30 08:30:56 +02:00
|
|
|
}
|
|
|
|
|
2008-02-08 13:22:04 +01:00
|
|
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|
|
|
unsigned long address)
|
2007-04-30 08:30:56 +02:00
|
|
|
{
|
2008-02-08 13:22:04 +01:00
|
|
|
struct page *page;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pte = pte_alloc_one_kernel(mm, address);
|
|
|
|
if (!pte)
|
|
|
|
return NULL;
|
|
|
|
page = virt_to_page(pte);
|
|
|
|
pgtable_page_ctor(page);
|
|
|
|
return page;
|
2007-04-30 08:30:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pgtable_free(pgtable_free_t pgf)
|
|
|
|
{
|
|
|
|
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
|
|
|
|
int cachenum = pgf.val & PGF_CACHENUM_MASK;
|
|
|
|
|
2007-05-09 06:38:48 +02:00
|
|
|
if (cachenum == PTE_NONCACHE_NUM)
|
|
|
|
free_page((unsigned long)p);
|
|
|
|
else
|
|
|
|
kmem_cache_free(pgtable_cache[cachenum], p);
|
2007-04-30 08:30:56 +02:00
|
|
|
}
|
|
|
|
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 07:44:28 +02:00
|
|
|
#define __pmd_free_tlb(tlb, pmd,addr) \
|
2007-04-30 08:30:56 +02:00
|
|
|
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
|
|
|
|
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
|
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 07:44:28 +02:00
|
|
|
#define __pud_free_tlb(tlb, pud, addr) \
|
2007-04-30 08:30:56 +02:00
|
|
|
pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
|
|
|
|
PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
|
|
|
|
#endif /* CONFIG_PPC_64K_PAGES */
|
|
|
|
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_PGALLOC_64_H */
|