vm_area_operations: kill ->migrate()

the only instance this method has ever grown was one in kernfs -
one that call ->migrate() of another vm_ops if it exists.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-05-15 05:06:42 -04:00
parent 777eda2c5b
commit 50062175ff
5 changed files with 0 additions and 60 deletions

View File

@ -448,27 +448,6 @@ static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
return pol;
}
static int kernfs_vma_migrate(struct vm_area_struct *vma,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return 0;
if (!kernfs_get_active(of->kn))
return 0;
ret = 0;
if (of->vm_ops->migrate)
ret = of->vm_ops->migrate(vma, from, to, flags);
kernfs_put_active(of->kn);
return ret;
}
#endif
static const struct vm_operations_struct kernfs_vm_ops = {
@ -479,7 +458,6 @@ static const struct vm_operations_struct kernfs_vm_ops = {
#ifdef CONFIG_NUMA
.set_policy = kernfs_vma_set_policy,
.get_policy = kernfs_vma_get_policy,
.migrate = kernfs_vma_migrate,
#endif
};

View File

@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
extern int migrate_prep(void);
extern int migrate_prep_local(void);
extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
static inline int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags)
{
return -ENOSYS;
}
static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}

View File

@ -286,8 +286,6 @@ struct vm_operations_struct {
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
const nodemask_t *to, unsigned long flags);
#endif
/* called by sys_remap_file_pages() to populate non-linear mapping */
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,

View File

@ -1047,10 +1047,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
down_read(&mm->mmap_sem);
err = migrate_vmas(mm, from, to, flags);
if (err)
goto out;
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@ -1130,7 +1126,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err < 0)
break;
}
out:
up_read(&mm->mmap_sem);
if (err < 0)
return err;

View File

@ -1536,27 +1536,6 @@ out:
return err;
}
/*
* Call migration functions in the vma_ops that may prepare
* memory in a vm for migration. migration functions may perform
* the migration for vmas that do not have an underlying page struct.
*/
int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
const nodemask_t *from, unsigned long flags)
{
struct vm_area_struct *vma;
int err = 0;
for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops->migrate) {
err = vma->vm_ops->migrate(vma, to, from, flags);
if (err)
break;
}
}
return err;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA