rhashtable: Add rhashtable_free_and_destroy()

rhashtable_destroy() variant which stops rehashes, iterates over
the table and calls a callback to release resources.

Avoids need for nft_hash to embed rhashtable internals and allows to
get rid of the being_destroyed flag. It also saves a 2nd mutex
lock upon destruction.

Also fixes an RCU lockdep splash on nft set destruction due to
calling rht_for_each_entry_safe() without holding bucket locks.
Open code this loop as we need know that no mutations may occur in
parallel.

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Thomas Graf 2015-03-24 14:18:20 +01:00 committed by David S. Miller
parent b5e2c150ac
commit 6b6f302ced
3 changed files with 49 additions and 30 deletions

View File

@ -136,12 +136,10 @@ struct rhashtable_params {
* @run_work: Deferred worker to expand/shrink asynchronously * @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping * @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list * @lock: Spin lock to protect walker list
* @being_destroyed: True if table is set up for destruction
*/ */
struct rhashtable { struct rhashtable {
struct bucket_table __rcu *tbl; struct bucket_table __rcu *tbl;
atomic_t nelems; atomic_t nelems;
bool being_destroyed;
unsigned int key_len; unsigned int key_len;
unsigned int elasticity; unsigned int elasticity;
struct rhashtable_params p; struct rhashtable_params p;
@ -334,6 +332,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter); void *rhashtable_walk_next(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg);
void rhashtable_destroy(struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \ #define rht_dereference(p, ht) \

View File

@ -359,8 +359,6 @@ static void rht_deferred_worker(struct work_struct *work)
ht = container_of(work, struct rhashtable, run_work); ht = container_of(work, struct rhashtable, run_work);
mutex_lock(&ht->mutex); mutex_lock(&ht->mutex);
if (ht->being_destroyed)
goto unlock;
tbl = rht_dereference(ht->tbl, ht); tbl = rht_dereference(ht->tbl, ht);
tbl = rhashtable_last_table(ht, tbl); tbl = rhashtable_last_table(ht, tbl);
@ -372,7 +370,6 @@ static void rht_deferred_worker(struct work_struct *work)
err = rhashtable_rehash_table(ht); err = rhashtable_rehash_table(ht);
unlock:
mutex_unlock(&ht->mutex); mutex_unlock(&ht->mutex);
if (err) if (err)
@ -783,21 +780,53 @@ int rhashtable_init(struct rhashtable *ht,
EXPORT_SYMBOL_GPL(rhashtable_init); EXPORT_SYMBOL_GPL(rhashtable_init);
/** /**
* rhashtable_destroy - destroy hash table * rhashtable_free_and_destroy - free elements and destroy hash table
* @ht: the hash table to destroy * @ht: the hash table to destroy
* @free_fn: callback to release resources of element
* @arg: pointer passed to free_fn
* *
* Frees the bucket array. This function is not rcu safe, therefore the caller * Stops an eventual async resize. If defined, invokes free_fn for each
* has to make sure that no resizing may happen by unpublishing the hashtable * element to releasal resources. Please note that RCU protected
* and waiting for the quiescent cycle before releasing the bucket array. * readers may still be accessing the elements. Releasing of resources
* must occur in a compatible manner. Then frees the bucket array.
*
* This function will eventually sleep to wait for an async resize
* to complete. The caller is responsible that no further write operations
* occurs in parallel.
*/ */
void rhashtable_destroy(struct rhashtable *ht) void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg)
{ {
ht->being_destroyed = true; const struct bucket_table *tbl;
unsigned int i;
cancel_work_sync(&ht->run_work); cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex); mutex_lock(&ht->mutex);
bucket_table_free(rht_dereference(ht->tbl, ht)); tbl = rht_dereference(ht->tbl, ht);
if (free_fn) {
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
for (pos = rht_dereference(tbl->buckets[i], ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
!rht_is_a_nulls(pos);
pos = next,
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL)
free_fn(rht_obj(ht, pos), arg);
}
}
bucket_table_free(tbl);
mutex_unlock(&ht->mutex); mutex_unlock(&ht->mutex);
} }
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
void rhashtable_destroy(struct rhashtable *ht)
{
return rhashtable_free_and_destroy(ht, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy); EXPORT_SYMBOL_GPL(rhashtable_destroy);

View File

@ -188,26 +188,15 @@ static int nft_hash_init(const struct nft_set *set,
return rhashtable_init(priv, &params); return rhashtable_init(priv, &params);
} }
static void nft_free_element(void *ptr, void *arg)
{
nft_hash_elem_destroy((const struct nft_set *)arg, ptr);
}
static void nft_hash_destroy(const struct nft_set *set) static void nft_hash_destroy(const struct nft_set *set)
{ {
struct rhashtable *priv = nft_set_priv(set); rhashtable_free_and_destroy(nft_set_priv(set), nft_free_element,
const struct bucket_table *tbl; (void *)set);
struct nft_hash_elem *he;
struct rhash_head *pos, *next;
unsigned int i;
/* Stop an eventual async resizing */
priv->being_destroyed = true;
mutex_lock(&priv->mutex);
tbl = rht_dereference(priv->tbl, priv);
for (i = 0; i < tbl->size; i++) {
rht_for_each_entry_safe(he, pos, next, tbl, i, node)
nft_hash_elem_destroy(set, he);
}
mutex_unlock(&priv->mutex);
rhashtable_destroy(priv);
} }
static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,