mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
rhashtable: Free bucket tables asynchronously after rehash
There is in fact no need to wait for an RCU grace period in the rehash function, since all insertions are guaranteed to go into the new table through spin locks. This patch uses call_rcu to free the old/rehashed table at our leisure. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5269b53da4
commit
9d901bc051
@ -54,6 +54,7 @@ struct rhash_head {
|
||||
* @locks_mask: Mask to apply before accessing locks[]
|
||||
* @locks: Array of spinlocks protecting individual buckets
|
||||
* @walkers: List of active walkers
|
||||
* @rcu: RCU structure for freeing the table
|
||||
* @buckets: size * hash buckets
|
||||
*/
|
||||
struct bucket_table {
|
||||
@ -63,6 +64,7 @@ struct bucket_table {
|
||||
unsigned int locks_mask;
|
||||
spinlock_t *locks;
|
||||
struct list_head walkers;
|
||||
struct rcu_head rcu;
|
||||
|
||||
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
@ -141,6 +141,11 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
||||
kvfree(tbl);
|
||||
}
|
||||
|
||||
static void bucket_table_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
bucket_table_free(container_of(head, struct bucket_table, rcu));
|
||||
}
|
||||
|
||||
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
size_t nbuckets)
|
||||
{
|
||||
@ -288,9 +293,7 @@ static void rhashtable_rehash(struct rhashtable *ht,
|
||||
* table, and thus no references to the old table will
|
||||
* remain.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
bucket_table_free(old_tbl);
|
||||
call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user