From 14fa2daa15887f9246cfedc345e83e8d24cb9058 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 24 Feb 2017 14:58:07 -0800 Subject: [PATCH] mm, uprobes: convert __replace_page() to use page_vma_mapped_walk() For consistency, it worth converting all page_check_address() to page_vma_mapped_walk(), so we could drop the former. Link: http://lkml.kernel.org/r/20170129173858.45174-10-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Reviewed-by: Srikar Dronamraju Cc: Andrea Arcangeli Cc: Hillf Danton Cc: Hugh Dickins Cc: Johannes Weiner Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rik van Riel Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/events/uprobes.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1e65c79e52a6..18c6b23edd3c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -153,14 +153,19 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; - spinlock_t *ptl; - pte_t *ptep; + struct page_vma_mapped_walk pvmw = { + .page = old_page, + .vma = vma, + .address = addr, + }; int err; /* For mmu_notifiers */ const unsigned long mmun_start = addr; const unsigned long mmun_end = addr + PAGE_SIZE; struct mem_cgroup *memcg; + VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); + err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, false); if (err) @@ -171,11 +176,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); err = -EAGAIN; - ptep = page_check_address(old_page, mm, addr, &ptl, 0); - if (!ptep) { + if (!page_vma_mapped_walk(&pvmw)) { mem_cgroup_cancel_charge(new_page, memcg, false); goto unlock; } + VM_BUG_ON_PAGE(addr != pvmw.address, old_page); get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); @@ -187,14 +192,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*ptep)); - ptep_clear_flush_notify(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot)); + flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); + ptep_clear_flush_notify(vma, addr, pvmw.pte); + set_pte_at_notify(mm, addr, pvmw.pte, + mk_pte(new_page, vma->vm_page_prot)); page_remove_rmap(old_page, false); if (!page_mapped(old_page)) try_to_free_swap(old_page); - pte_unmap_unlock(ptep, ptl); + page_vma_mapped_walk_done(&pvmw); if (vma->vm_flags & VM_LOCKED) munlock_vma_page(old_page);