Skip to content

Commit db30485

Browse files
ying-xuedavem330
authored andcommitted
rhashtable: involve rhashtable_lookup_insert routine
Involve a new function called rhashtable_lookup_insert() which makes lookup and insertion atomic under bucket lock protection, helping us avoid to introduce an extra lock when we search and insert an object into hash table. Signed-off-by: Ying Xue <[email protected]> Signed-off-by: Thomas Graf <[email protected]> Acked-by: Thomas Graf <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 54c5b7d commit db30485

File tree

2 files changed

+83
-15
lines changed

2 files changed

+83
-15
lines changed

include/linux/rhashtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ int rhashtable_shrink(struct rhashtable *ht);
168168
void *rhashtable_lookup(struct rhashtable *ht, const void *key);
169169
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
170170
bool (*compare)(void *, void *), void *arg);
171+
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
171172

172173
void rhashtable_destroy(struct rhashtable *ht);
173174

lib/rhashtable.c

Lines changed: 82 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
505505
schedule_delayed_work(&ht->run_work, 0);
506506
}
507507

508+
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
509+
struct bucket_table *tbl, u32 hash)
510+
{
511+
struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
512+
tbl, hash);
513+
514+
if (rht_is_a_nulls(head))
515+
INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
516+
else
517+
RCU_INIT_POINTER(obj->next, head);
518+
519+
rcu_assign_pointer(tbl->buckets[hash], obj);
520+
521+
atomic_inc(&ht->nelems);
522+
523+
rhashtable_wakeup_worker(ht);
524+
}
525+
508526
/**
509-
* rhashtable_insert - insert object into hash hash table
527+
* rhashtable_insert - insert object into hash table
510528
* @ht: hash table
511529
* @obj: pointer to hash head inside object
512530
*
@@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
523541
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
524542
{
525543
struct bucket_table *tbl;
526-
struct rhash_head *head;
527544
spinlock_t *lock;
528545
unsigned hash;
529546

@@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
534551
lock = bucket_lock(tbl, hash);
535552

536553
spin_lock_bh(lock);
537-
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
538-
if (rht_is_a_nulls(head))
539-
INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
540-
else
541-
RCU_INIT_POINTER(obj->next, head);
542-
543-
rcu_assign_pointer(tbl->buckets[hash], obj);
554+
__rhashtable_insert(ht, obj, tbl, hash);
544555
spin_unlock_bh(lock);
545556

546-
atomic_inc(&ht->nelems);
547-
548-
rhashtable_wakeup_worker(ht);
549-
550557
rcu_read_unlock();
551558
}
552559
EXPORT_SYMBOL_GPL(rhashtable_insert);
@@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
560567
* walk the bucket chain upon removal. The removal operation is thus
561568
* considerable slow if the hash table is not correctly sized.
562569
*
563-
* Will automatically shrink the table via rhashtable_expand() if the the
570+
* Will automatically shrink the table via rhashtable_expand() if the
564571
* shrink_decision function specified at rhashtable_init() returns true.
565572
*
566573
* The caller must ensure that no concurrent table mutations occur. It is
@@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg)
641648
* for a entry with an identical key. The first matching entry is returned.
642649
*
643650
* This lookup function may only be used for fixed key hash table (key_len
644-
* paramter set). It will BUG() if used inappropriately.
651+
* parameter set). It will BUG() if used inappropriately.
645652
*
646653
* Lookups may occur in parallel with hashtable mutations and resizing.
647654
*/
@@ -702,6 +709,66 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
702709
}
703710
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
704711

712+
/**
713+
* rhashtable_lookup_insert - lookup and insert object into hash table
714+
* @ht: hash table
715+
* @obj: pointer to hash head inside object
716+
*
717+
* Locks down the bucket chain in both the old and new table if a resize
718+
* is in progress to ensure that writers can't remove from the old table
719+
* and can't insert to the new table during the atomic operation of search
720+
* and insertion. Searches for duplicates in both the old and new table if
721+
* a resize is in progress.
722+
*
723+
* This lookup function may only be used for fixed key hash table (key_len
724+
* parameter set). It will BUG() if used inappropriately.
725+
*
726+
* It is safe to call this function from atomic context.
727+
*
728+
* Will trigger an automatic deferred table resizing if the size grows
729+
* beyond the watermark indicated by grow_decision() which can be passed
730+
* to rhashtable_init().
731+
*/
732+
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
733+
{
734+
struct bucket_table *new_tbl, *old_tbl;
735+
spinlock_t *new_bucket_lock, *old_bucket_lock;
736+
u32 new_hash, old_hash;
737+
bool success = true;
738+
739+
BUG_ON(!ht->p.key_len);
740+
741+
rcu_read_lock();
742+
743+
old_tbl = rht_dereference_rcu(ht->tbl, ht);
744+
old_hash = head_hashfn(ht, old_tbl, obj);
745+
old_bucket_lock = bucket_lock(old_tbl, old_hash);
746+
spin_lock_bh(old_bucket_lock);
747+
748+
new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
749+
new_hash = head_hashfn(ht, new_tbl, obj);
750+
new_bucket_lock = bucket_lock(new_tbl, new_hash);
751+
if (unlikely(old_tbl != new_tbl))
752+
spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
753+
754+
if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
755+
success = false;
756+
goto exit;
757+
}
758+
759+
__rhashtable_insert(ht, obj, new_tbl, new_hash);
760+
761+
exit:
762+
if (unlikely(old_tbl != new_tbl))
763+
spin_unlock_bh(new_bucket_lock);
764+
spin_unlock_bh(old_bucket_lock);
765+
766+
rcu_read_unlock();
767+
768+
return success;
769+
}
770+
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
771+
705772
static size_t rounded_hashtable_size(struct rhashtable_params *params)
706773
{
707774
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),

0 commit comments

Comments
 (0)