diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f9ecf32bce55..d7be9cb0e91f 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -133,6 +133,7 @@ struct rhashtable_params { * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping + * @lock: Spin lock to protect walker list * @being_destroyed: True if table is set up for destruction */ struct rhashtable { @@ -144,6 +145,7 @@ struct rhashtable { struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; + spinlock_t lock; }; /** diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 7686c1e9934a..e96ad1a52c90 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -256,8 +256,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht) /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); + spin_lock(&ht->lock); list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; + spin_unlock(&ht->lock); /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will @@ -635,12 +637,12 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) ht = iter->ht; - mutex_lock(&ht->mutex); + spin_lock(&ht->lock); if (tbl->rehash < tbl->size) list_add(&iter->walker->list, &tbl->walkers); else iter->walker->tbl = NULL; - mutex_unlock(&ht->mutex); + spin_unlock(&ht->lock); iter->p = NULL; @@ -723,6 +725,7 @@ int rhashtable_init(struct rhashtable *ht, memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); + spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); if (params->min_size)