linux/arch/arm/mm/pgd.c
Andrea Arcangeli 8ac1f8320a thp: pte alloc trans splitting
pte alloc routines must wait for split_huge_page if the pmd is not present
and not null (i.e.  pmd_trans_splitting).  The additional branches are
optimized away at compile time by pmd_trans_splitting if the config option
is off.  However we must pass the vma down in order to know the anon_vma
lock to wait for.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-01-13 17:32:40 -08:00

100 lines
2.0 KiB
C

/*
* linux/arch/arm/mm/pgd.c
*
* Copyright (C) 1998-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "mm.h"
/*
* need to get a 16k page for level 1
*/
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, new_pgd, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pgtable_t pte;
if (!pgd_base)
return;
pgd = pgd_base + pgd_index(0);
if (pgd_none_or_clear_bad(pgd))
goto no_pgd;
pmd = pmd_offset(pgd, 0);
if (pmd_none_or_clear_bad(pmd))
goto no_pmd;
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
no_pmd:
pgd_clear(pgd);
pmd_free(mm, pmd);
no_pgd:
free_pages((unsigned long) pgd_base, 2);
}