@@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
505
505
schedule_delayed_work (& ht -> run_work , 0 );
506
506
}
507
507
508
+ static void __rhashtable_insert (struct rhashtable * ht , struct rhash_head * obj ,
509
+ struct bucket_table * tbl , u32 hash )
510
+ {
511
+ struct rhash_head * head = rht_dereference_bucket (tbl -> buckets [hash ],
512
+ tbl , hash );
513
+
514
+ if (rht_is_a_nulls (head ))
515
+ INIT_RHT_NULLS_HEAD (obj -> next , ht , hash );
516
+ else
517
+ RCU_INIT_POINTER (obj -> next , head );
518
+
519
+ rcu_assign_pointer (tbl -> buckets [hash ], obj );
520
+
521
+ atomic_inc (& ht -> nelems );
522
+
523
+ rhashtable_wakeup_worker (ht );
524
+ }
525
+
508
526
/**
509
- * rhashtable_insert - insert object into hash hash table
527
+ * rhashtable_insert - insert object into hash table
510
528
* @ht: hash table
511
529
* @obj: pointer to hash head inside object
512
530
*
@@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
523
541
void rhashtable_insert (struct rhashtable * ht , struct rhash_head * obj )
524
542
{
525
543
struct bucket_table * tbl ;
526
- struct rhash_head * head ;
527
544
spinlock_t * lock ;
528
545
unsigned hash ;
529
546
@@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
534
551
lock = bucket_lock (tbl , hash );
535
552
536
553
spin_lock_bh (lock );
537
- head = rht_dereference_bucket (tbl -> buckets [hash ], tbl , hash );
538
- if (rht_is_a_nulls (head ))
539
- INIT_RHT_NULLS_HEAD (obj -> next , ht , hash );
540
- else
541
- RCU_INIT_POINTER (obj -> next , head );
542
-
543
- rcu_assign_pointer (tbl -> buckets [hash ], obj );
554
+ __rhashtable_insert (ht , obj , tbl , hash );
544
555
spin_unlock_bh (lock );
545
556
546
- atomic_inc (& ht -> nelems );
547
-
548
- rhashtable_wakeup_worker (ht );
549
-
550
557
rcu_read_unlock ();
551
558
}
552
559
EXPORT_SYMBOL_GPL (rhashtable_insert );
@@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
560
567
* walk the bucket chain upon removal. The removal operation is thus
561
568
* considerable slow if the hash table is not correctly sized.
562
569
*
563
- * Will automatically shrink the table via rhashtable_expand() if the the
570
+ * Will automatically shrink the table via rhashtable_expand() if the
564
571
* shrink_decision function specified at rhashtable_init() returns true.
565
572
*
566
573
* The caller must ensure that no concurrent table mutations occur. It is
@@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg)
641
648
* for a entry with an identical key. The first matching entry is returned.
642
649
*
643
650
* This lookup function may only be used for fixed key hash table (key_len
644
- * paramter set). It will BUG() if used inappropriately.
651
+ * parameter set). It will BUG() if used inappropriately.
645
652
*
646
653
* Lookups may occur in parallel with hashtable mutations and resizing.
647
654
*/
@@ -702,6 +709,66 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
702
709
}
703
710
EXPORT_SYMBOL_GPL (rhashtable_lookup_compare );
704
711
712
+ /**
713
+ * rhashtable_lookup_insert - lookup and insert object into hash table
714
+ * @ht: hash table
715
+ * @obj: pointer to hash head inside object
716
+ *
717
+ * Locks down the bucket chain in both the old and new table if a resize
718
+ * is in progress to ensure that writers can't remove from the old table
719
+ * and can't insert to the new table during the atomic operation of search
720
+ * and insertion. Searches for duplicates in both the old and new table if
721
+ * a resize is in progress.
722
+ *
723
+ * This lookup function may only be used for fixed key hash table (key_len
724
+ * parameter set). It will BUG() if used inappropriately.
725
+ *
726
+ * It is safe to call this function from atomic context.
727
+ *
728
+ * Will trigger an automatic deferred table resizing if the size grows
729
+ * beyond the watermark indicated by grow_decision() which can be passed
730
+ * to rhashtable_init().
731
+ */
732
+ bool rhashtable_lookup_insert (struct rhashtable * ht , struct rhash_head * obj )
733
+ {
734
+ struct bucket_table * new_tbl , * old_tbl ;
735
+ spinlock_t * new_bucket_lock , * old_bucket_lock ;
736
+ u32 new_hash , old_hash ;
737
+ bool success = true;
738
+
739
+ BUG_ON (!ht -> p .key_len );
740
+
741
+ rcu_read_lock ();
742
+
743
+ old_tbl = rht_dereference_rcu (ht -> tbl , ht );
744
+ old_hash = head_hashfn (ht , old_tbl , obj );
745
+ old_bucket_lock = bucket_lock (old_tbl , old_hash );
746
+ spin_lock_bh (old_bucket_lock );
747
+
748
+ new_tbl = rht_dereference_rcu (ht -> future_tbl , ht );
749
+ new_hash = head_hashfn (ht , new_tbl , obj );
750
+ new_bucket_lock = bucket_lock (new_tbl , new_hash );
751
+ if (unlikely (old_tbl != new_tbl ))
752
+ spin_lock_bh_nested (new_bucket_lock , RHT_LOCK_NESTED );
753
+
754
+ if (rhashtable_lookup (ht , rht_obj (ht , obj ) + ht -> p .key_offset )) {
755
+ success = false;
756
+ goto exit ;
757
+ }
758
+
759
+ __rhashtable_insert (ht , obj , new_tbl , new_hash );
760
+
761
+ exit :
762
+ if (unlikely (old_tbl != new_tbl ))
763
+ spin_unlock_bh (new_bucket_lock );
764
+ spin_unlock_bh (old_bucket_lock );
765
+
766
+ rcu_read_unlock ();
767
+
768
+ return success ;
769
+ }
770
+ EXPORT_SYMBOL_GPL (rhashtable_lookup_insert );
771
+
705
772
static size_t rounded_hashtable_size (struct rhashtable_params * params )
706
773
{
707
774
return max (roundup_pow_of_two (params -> nelem_hint * 4 / 3 ),
0 commit comments