s390/vdso: use _install_special_mapping to establish vdso

Switch to the improved _install_special_mapping function to install
the vdso mapping. This has two advantages, the arch_vma_name function
is not needed anymore and the vdso vma still has its name after its
memory location has been changed with mremap.

Tested-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Martin Schwidefsky 2017-05-15 10:23:38 +02:00
parent b29e061bb7
commit 35bb092a91
1 changed files with 64 additions and 27 deletions

View File

@ -50,6 +50,56 @@ static struct page **vdso64_pagelist;
*/
unsigned int __read_mostly vdso_enabled = 1;
static int vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page **vdso_pagelist;
unsigned long vdso_pages;
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
#endif
if (vmf->pgoff >= vdso_pages)
return VM_FAULT_SIGBUS;
vmf->page = vdso_pagelist[vmf->pgoff];
get_page(vmf->page);
return 0;
}
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task())
vdso_pages = vdso32_pages;
#endif
if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
return -EINVAL;
if (WARN_ON_ONCE(current->mm != vma->vm_mm))
return -EFAULT;
current->mm->context.vdso_base = vma->vm_start;
return 0;
}
static const struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
.fault = vdso_fault,
.mremap = vdso_mremap,
};
static int __init vdso_setup(char *s)
{
unsigned long val;
@ -181,7 +231,7 @@ static void vdso_init_cr5(void)
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
@ -194,13 +244,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!uses_interp)
return 0;
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
vdso_pagelist = vdso32_pagelist;
if (is_compat_task())
vdso_pages = vdso32_pages;
}
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
@ -209,8 +256,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (vdso_pages == 0)
return 0;
current->mm->context.vdso_base = 0;
/*
* pick a base address for the vDSO in process space. We try to put
* it at vdso_base which is the "natural" base for it, but we might
@ -224,13 +269,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out_up;
}
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO (since arch_vma_name fails).
*/
current->mm->context.vdso_base = vdso_base;
/*
* our vma flags don't have VM_WRITE so by default, the process
* isn't allowed to write those pages.
@ -241,24 +279,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
if (rc)
current->mm->context.vdso_base = 0;
vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_mapping);
if (IS_ERR(vma)) {
rc = PTR_ERR(vma);
goto out_up;
}
current->mm->context.vdso_base = vdso_base;
rc = 0;
out_up:
up_write(&mm->mmap_sem);
return rc;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
return "[vdso]";
return NULL;
}
static int __init vdso_init(void)
{
int i;