misc/sgi-gru: use mmu_notifier_get/put for struct gru_mm_struct

GRU is already using almost the same algorithm as get/put, it even
helpfully has a 10 year old comment to make this algorithm common, which
is finally happening.

There are a few differences and fixes from this conversion:
- GRU used rcu not srcu to read the hlist
- Unclear how the locking worked to prevent gru_register_mmu_notifier()
  from running concurrently with gru_drop_mmu_notifier() - this version is
  safe
- GRU had a release function which only set a variable without any locking
  that skiped the synchronize_srcu during unregister which looks racey,
  but this makes it reliable via the integrated call_srcu().
- It is unclear if the mmap_sem is actually held when
  __mmu_notifier_register() was called, lockdep will now warn if this is
  wrong

Link: https://lore.kernel.org/r/20190806231548.25242-5-jgg@ziepe.ca
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dimitri Sivanich <sivanich@hpe.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2019-08-06 20:15:41 -03:00
parent 2c7933f53f
commit e4c057d02c
3 changed files with 24 additions and 61 deletions

View File

@ -573,6 +573,7 @@ static void __exit gru_exit(void)
gru_free_tables(); gru_free_tables();
misc_deregister(&gru_miscdev); misc_deregister(&gru_miscdev);
gru_proc_exit(); gru_proc_exit();
mmu_notifier_synchronize();
} }
static const struct file_operations gru_fops = { static const struct file_operations gru_fops = {

View File

@ -307,10 +307,8 @@ struct gru_mm_tracker { /* pack to reduce size */
struct gru_mm_struct { struct gru_mm_struct {
struct mmu_notifier ms_notifier; struct mmu_notifier ms_notifier;
atomic_t ms_refcnt;
spinlock_t ms_asid_lock; /* protects ASID assignment */ spinlock_t ms_asid_lock; /* protects ASID assignment */
atomic_t ms_range_active;/* num range_invals active */ atomic_t ms_range_active;/* num range_invals active */
char ms_released;
wait_queue_head_t ms_wait_queue; wait_queue_head_t ms_wait_queue;
DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS); DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS);
struct gru_mm_tracker ms_asids[GRU_MAX_GRUS]; struct gru_mm_tracker ms_asids[GRU_MAX_GRUS];

View File

@ -235,83 +235,47 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
gms, range->start, range->end); gms, range->start, range->end);
} }
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
{ {
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, struct gru_mm_struct *gms;
ms_notifier);
gms->ms_released = 1; gms = kzalloc(sizeof(*gms), GFP_KERNEL);
gru_dbg(grudev, "gms %p\n", gms); if (!gms)
return ERR_PTR(-ENOMEM);
STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
init_waitqueue_head(&gms->ms_wait_queue);
return &gms->ms_notifier;
} }
static void gru_free_notifier(struct mmu_notifier *mn)
{
kfree(container_of(mn, struct gru_mm_struct, ms_notifier));
STAT(gms_free);
}
static const struct mmu_notifier_ops gru_mmuops = { static const struct mmu_notifier_ops gru_mmuops = {
.invalidate_range_start = gru_invalidate_range_start, .invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end, .invalidate_range_end = gru_invalidate_range_end,
.release = gru_release, .alloc_notifier = gru_alloc_notifier,
.free_notifier = gru_free_notifier,
}; };
/* Move this to the basic mmu_notifier file. But for now... */
static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
const struct mmu_notifier_ops *ops)
{
struct mmu_notifier *mn, *gru_mn = NULL;
if (mm->mmu_notifier_mm) {
rcu_read_lock();
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
hlist)
if (mn->ops == ops) {
gru_mn = mn;
break;
}
rcu_read_unlock();
}
return gru_mn;
}
struct gru_mm_struct *gru_register_mmu_notifier(void) struct gru_mm_struct *gru_register_mmu_notifier(void)
{ {
struct gru_mm_struct *gms;
struct mmu_notifier *mn; struct mmu_notifier *mn;
int err;
mn = mmu_find_ops(current->mm, &gru_mmuops); mn = mmu_notifier_get_locked(&gru_mmuops, current->mm);
if (mn) { if (IS_ERR(mn))
gms = container_of(mn, struct gru_mm_struct, ms_notifier); return ERR_CAST(mn);
atomic_inc(&gms->ms_refcnt);
} else { return container_of(mn, struct gru_mm_struct, ms_notifier);
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (!gms)
return ERR_PTR(-ENOMEM);
STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
gms->ms_notifier.ops = &gru_mmuops;
atomic_set(&gms->ms_refcnt, 1);
init_waitqueue_head(&gms->ms_wait_queue);
err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
if (err)
goto error;
}
if (gms)
gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
atomic_read(&gms->ms_refcnt));
return gms;
error:
kfree(gms);
return ERR_PTR(err);
} }
void gru_drop_mmu_notifier(struct gru_mm_struct *gms) void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
{ {
gru_dbg(grudev, "gms %p, refcnt %d, released %d\n", gms, mmu_notifier_put(&gms->ms_notifier);
atomic_read(&gms->ms_refcnt), gms->ms_released);
if (atomic_dec_return(&gms->ms_refcnt) == 0) {
if (!gms->ms_released)
mmu_notifier_unregister(&gms->ms_notifier, current->mm);
kfree(gms);
STAT(gms_free);
}
} }
/* /*