userfaultfd: shmem: add shmem_mfill_zeropage_pte for userfaultfd support
shmem_mfill_zeropage_pte is the low level routine that implements the userfaultfd UFFDIO_ZEROPAGE command. Since for shmem mappings zero pages are always allocated and accounted, the new method is a slight extension of the existing shmem_mcopy_atomic_pte. Link: http://lkml.kernel.org/r/1497939652-16528-4-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Hugh Dickins <hughd@google.com> Cc: Pavel Emelyanov <xemul@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0f07969456
commit
8d10396342
|
@ -137,9 +137,15 @@ extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||||
unsigned long dst_addr,
|
unsigned long dst_addr,
|
||||||
unsigned long src_addr,
|
unsigned long src_addr,
|
||||||
struct page **pagep);
|
struct page **pagep);
|
||||||
|
extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
|
||||||
|
pmd_t *dst_pmd,
|
||||||
|
struct vm_area_struct *dst_vma,
|
||||||
|
unsigned long dst_addr);
|
||||||
#else
|
#else
|
||||||
#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
||||||
src_addr, pagep) ({ BUG(); 0; })
|
src_addr, pagep) ({ BUG(); 0; })
|
||||||
|
#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
|
||||||
|
dst_addr) ({ BUG(); 0; })
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
60
mm/shmem.c
60
mm/shmem.c
|
@ -2207,12 +2207,13 @@ bool shmem_mapping(struct address_space *mapping)
|
||||||
return mapping->a_ops == &shmem_aops;
|
return mapping->a_ops == &shmem_aops;
|
||||||
}
|
}
|
||||||
|
|
||||||
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||||
pmd_t *dst_pmd,
|
pmd_t *dst_pmd,
|
||||||
struct vm_area_struct *dst_vma,
|
struct vm_area_struct *dst_vma,
|
||||||
unsigned long dst_addr,
|
unsigned long dst_addr,
|
||||||
unsigned long src_addr,
|
unsigned long src_addr,
|
||||||
struct page **pagep)
|
bool zeropage,
|
||||||
|
struct page **pagep)
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(dst_vma->vm_file);
|
struct inode *inode = file_inode(dst_vma->vm_file);
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
|
@ -2235,17 +2236,22 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out_unacct_blocks;
|
goto out_unacct_blocks;
|
||||||
|
|
||||||
page_kaddr = kmap_atomic(page);
|
if (!zeropage) { /* mcopy_atomic */
|
||||||
ret = copy_from_user(page_kaddr, (const void __user *)src_addr,
|
page_kaddr = kmap_atomic(page);
|
||||||
PAGE_SIZE);
|
ret = copy_from_user(page_kaddr,
|
||||||
kunmap_atomic(page_kaddr);
|
(const void __user *)src_addr,
|
||||||
|
PAGE_SIZE);
|
||||||
|
kunmap_atomic(page_kaddr);
|
||||||
|
|
||||||
/* fallback to copy_from_user outside mmap_sem */
|
/* fallback to copy_from_user outside mmap_sem */
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
*pagep = page;
|
*pagep = page;
|
||||||
shmem_inode_unacct_blocks(inode, 1);
|
shmem_inode_unacct_blocks(inode, 1);
|
||||||
/* don't free the page */
|
/* don't free the page */
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
} else { /* mfill_zeropage_atomic */
|
||||||
|
clear_highpage(page);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
page = *pagep;
|
page = *pagep;
|
||||||
|
@ -2311,6 +2317,28 @@ out_unacct_blocks:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||||
|
pmd_t *dst_pmd,
|
||||||
|
struct vm_area_struct *dst_vma,
|
||||||
|
unsigned long dst_addr,
|
||||||
|
unsigned long src_addr,
|
||||||
|
struct page **pagep)
|
||||||
|
{
|
||||||
|
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
|
||||||
|
dst_addr, src_addr, false, pagep);
|
||||||
|
}
|
||||||
|
|
||||||
|
int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
|
||||||
|
pmd_t *dst_pmd,
|
||||||
|
struct vm_area_struct *dst_vma,
|
||||||
|
unsigned long dst_addr)
|
||||||
|
{
|
||||||
|
struct page *page = NULL;
|
||||||
|
|
||||||
|
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
|
||||||
|
dst_addr, 0, true, &page);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TMPFS
|
#ifdef CONFIG_TMPFS
|
||||||
static const struct inode_operations shmem_symlink_inode_operations;
|
static const struct inode_operations shmem_symlink_inode_operations;
|
||||||
static const struct inode_operations shmem_short_symlink_operations;
|
static const struct inode_operations shmem_short_symlink_operations;
|
||||||
|
|
Loading…
Reference in New Issue