4eb0716e86
On systems without CONTIG_ALLOC activated but that support gigantic pages, boottime reserved gigantic pages can not be freed at all. This patch simply enables the possibility to hand back those pages to memory allocator. Link: http://lkml.kernel.org/r/20190327063626.18421-5-alex@ghiti.fr Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Acked-by: David S. Miller <davem@davemloft.net> [sparc] Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Andy Lutomirsky <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rich Felker <dalias@libc.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
137 lines
3.0 KiB
C
137 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_GENERIC_HUGETLB_H
|
|
#define _ASM_GENERIC_HUGETLB_H
|
|
|
|
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
|
|
{
|
|
return mk_pte(page, pgprot);
|
|
}
|
|
|
|
static inline unsigned long huge_pte_write(pte_t pte)
|
|
{
|
|
return pte_write(pte);
|
|
}
|
|
|
|
static inline unsigned long huge_pte_dirty(pte_t pte)
|
|
{
|
|
return pte_dirty(pte);
|
|
}
|
|
|
|
static inline pte_t huge_pte_mkwrite(pte_t pte)
|
|
{
|
|
return pte_mkwrite(pte);
|
|
}
|
|
|
|
static inline pte_t huge_pte_mkdirty(pte_t pte)
|
|
{
|
|
return pte_mkdirty(pte);
|
|
}
|
|
|
|
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
|
|
{
|
|
return pte_modify(pte, newprot);
|
|
}
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
|
|
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, unsigned long sz)
|
|
{
|
|
pte_clear(mm, addr, ptep);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
|
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned long floor, unsigned long ceiling)
|
|
{
|
|
free_pgd_range(tlb, addr, end, floor, ceiling);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
|
|
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte)
|
|
{
|
|
set_pte_at(mm, addr, ptep, pte);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
|
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
return ptep_get_and_clear(mm, addr, ptep);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
|
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
ptep_clear_flush(vma, addr, ptep);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTE_NONE
|
|
static inline int huge_pte_none(pte_t pte)
|
|
{
|
|
return pte_none(pte);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
|
|
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
|
{
|
|
return pte_wrprotect(pte);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
|
static inline int prepare_hugepage_range(struct file *file,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
struct hstate *h = hstate_file(file);
|
|
|
|
if (len & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
if (addr & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
|
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
ptep_set_wrprotect(mm, addr, ptep);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
|
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte, int dirty)
|
|
{
|
|
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
|
|
static inline pte_t huge_ptep_get(pte_t *ptep)
|
|
{
|
|
return *ptep;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
|
|
static inline bool gigantic_page_runtime_supported(void)
|
|
{
|
|
return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
|
|
}
|
|
#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
|
|
|
|
#endif /* _ASM_GENERIC_HUGETLB_H */
|