@@ -36,8 +36,8 @@ struct mpath_node {
36
36
struct mesh_path * mpath ;
37
37
};
38
38
39
- static struct mesh_table * mesh_paths ;
40
- static struct mesh_table * mpp_paths ; /* Store paths for MPP&MAP */
39
+ static struct mesh_table __rcu * mesh_paths ;
40
+ static struct mesh_table __rcu * mpp_paths ; /* Store paths for MPP&MAP */
41
41
42
42
int mesh_paths_generation ;
43
43
@@ -48,6 +48,29 @@ int mesh_paths_generation;
48
48
static DEFINE_RWLOCK (pathtbl_resize_lock );
49
49
50
50
51
+ static inline struct mesh_table * resize_dereference_mesh_paths (void )
52
+ {
53
+ return rcu_dereference_protected (mesh_paths ,
54
+ lockdep_is_held (& pathtbl_resize_lock ));
55
+ }
56
+
57
+ static inline struct mesh_table * resize_dereference_mpp_paths (void )
58
+ {
59
+ return rcu_dereference_protected (mpp_paths ,
60
+ lockdep_is_held (& pathtbl_resize_lock ));
61
+ }
62
+
63
+ /*
64
+ * CAREFUL -- "tbl" must not be an expression,
65
+ * in particular not an rcu_dereference(), since
66
+ * it's used twice. So it is illegal to do
67
+ * for_each_mesh_entry(rcu_dereference(...), ...)
68
+ */
69
+ #define for_each_mesh_entry (tbl , p , node , i ) \
70
+ for (i = 0; i <= tbl->hash_mask; i++) \
71
+ hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
+
73
+
51
74
static struct mesh_table * mesh_table_alloc (int size_order )
52
75
{
53
76
int i ;
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
258
281
*/
259
282
struct mesh_path * mesh_path_lookup_by_idx (int idx , struct ieee80211_sub_if_data * sdata )
260
283
{
284
+ struct mesh_table * tbl = rcu_dereference (mesh_paths );
261
285
struct mpath_node * node ;
262
286
struct hlist_node * p ;
263
287
int i ;
264
288
int j = 0 ;
265
289
266
- for_each_mesh_entry (mesh_paths , p , node , i ) {
290
+ for_each_mesh_entry (tbl , p , node , i ) {
267
291
if (sdata && node -> mpath -> sdata != sdata )
268
292
continue ;
269
293
if (j ++ == idx ) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293
317
{
294
318
struct ieee80211_if_mesh * ifmsh = & sdata -> u .mesh ;
295
319
struct ieee80211_local * local = sdata -> local ;
320
+ struct mesh_table * tbl ;
296
321
struct mesh_path * mpath , * new_mpath ;
297
322
struct mpath_node * node , * new_node ;
298
323
struct hlist_head * bucket ;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
332
357
spin_lock_init (& new_mpath -> state_lock );
333
358
init_timer (& new_mpath -> timer );
334
359
335
- hash_idx = mesh_table_hash (dst , sdata , mesh_paths );
336
- bucket = & mesh_paths -> hash_buckets [hash_idx ];
360
+ tbl = resize_dereference_mesh_paths ();
337
361
338
- spin_lock_bh (& mesh_paths -> hashwlock [hash_idx ]);
362
+ hash_idx = mesh_table_hash (dst , sdata , tbl );
363
+ bucket = & tbl -> hash_buckets [hash_idx ];
364
+
365
+ spin_lock_bh (& tbl -> hashwlock [hash_idx ]);
339
366
340
367
err = - EEXIST ;
341
368
hlist_for_each_entry (node , n , bucket , list ) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
345
372
}
346
373
347
374
hlist_add_head_rcu (& new_node -> list , bucket );
348
- if (atomic_inc_return (& mesh_paths -> entries ) >=
349
- mesh_paths -> mean_chain_len * (mesh_paths -> hash_mask + 1 ))
375
+ if (atomic_inc_return (& tbl -> entries ) >=
376
+ tbl -> mean_chain_len * (tbl -> hash_mask + 1 ))
350
377
grow = 1 ;
351
378
352
379
mesh_paths_generation ++ ;
353
380
354
- spin_unlock_bh (& mesh_paths -> hashwlock [hash_idx ]);
381
+ spin_unlock_bh (& tbl -> hashwlock [hash_idx ]);
355
382
read_unlock_bh (& pathtbl_resize_lock );
356
383
if (grow ) {
357
384
set_bit (MESH_WORK_GROW_MPATH_TABLE , & ifmsh -> wrkq_flags );
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
360
387
return 0 ;
361
388
362
389
err_exists :
363
- spin_unlock_bh (& mesh_paths -> hashwlock [hash_idx ]);
390
+ spin_unlock_bh (& tbl -> hashwlock [hash_idx ]);
364
391
read_unlock_bh (& pathtbl_resize_lock );
365
392
kfree (new_node );
366
393
err_node_alloc :
@@ -382,11 +409,11 @@ void mesh_mpath_table_grow(void)
382
409
struct mesh_table * oldtbl , * newtbl ;
383
410
384
411
write_lock_bh (& pathtbl_resize_lock );
385
- newtbl = mesh_table_alloc (mesh_paths -> size_order + 1 );
412
+ oldtbl = resize_dereference_mesh_paths ();
413
+ newtbl = mesh_table_alloc (oldtbl -> size_order + 1 );
386
414
if (!newtbl )
387
415
goto out ;
388
- oldtbl = mesh_paths ;
389
- if (mesh_table_grow (mesh_paths , newtbl ) < 0 ) {
416
+ if (mesh_table_grow (oldtbl , newtbl ) < 0 ) {
390
417
__mesh_table_free (newtbl );
391
418
goto out ;
392
419
}
@@ -403,11 +430,11 @@ void mesh_mpp_table_grow(void)
403
430
struct mesh_table * oldtbl , * newtbl ;
404
431
405
432
write_lock_bh (& pathtbl_resize_lock );
406
- newtbl = mesh_table_alloc (mpp_paths -> size_order + 1 );
433
+ oldtbl = resize_dereference_mpp_paths ();
434
+ newtbl = mesh_table_alloc (oldtbl -> size_order + 1 );
407
435
if (!newtbl )
408
436
goto out ;
409
- oldtbl = mpp_paths ;
410
- if (mesh_table_grow (mpp_paths , newtbl ) < 0 ) {
437
+ if (mesh_table_grow (oldtbl , newtbl ) < 0 ) {
411
438
__mesh_table_free (newtbl );
412
439
goto out ;
413
440
}
@@ -422,6 +449,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422
449
{
423
450
struct ieee80211_if_mesh * ifmsh = & sdata -> u .mesh ;
424
451
struct ieee80211_local * local = sdata -> local ;
452
+ struct mesh_table * tbl ;
425
453
struct mesh_path * mpath , * new_mpath ;
426
454
struct mpath_node * node , * new_node ;
427
455
struct hlist_head * bucket ;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
456
484
new_mpath -> exp_time = jiffies ;
457
485
spin_lock_init (& new_mpath -> state_lock );
458
486
459
- hash_idx = mesh_table_hash (dst , sdata , mpp_paths );
460
- bucket = & mpp_paths -> hash_buckets [hash_idx ];
487
+ tbl = resize_dereference_mpp_paths ();
461
488
462
- spin_lock_bh (& mpp_paths -> hashwlock [hash_idx ]);
489
+ hash_idx = mesh_table_hash (dst , sdata , tbl );
490
+ bucket = & tbl -> hash_buckets [hash_idx ];
491
+
492
+ spin_lock_bh (& tbl -> hashwlock [hash_idx ]);
463
493
464
494
err = - EEXIST ;
465
495
hlist_for_each_entry (node , n , bucket , list ) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
469
499
}
470
500
471
501
hlist_add_head_rcu (& new_node -> list , bucket );
472
- if (atomic_inc_return (& mpp_paths -> entries ) >=
473
- mpp_paths -> mean_chain_len * (mpp_paths -> hash_mask + 1 ))
502
+ if (atomic_inc_return (& tbl -> entries ) >=
503
+ tbl -> mean_chain_len * (tbl -> hash_mask + 1 ))
474
504
grow = 1 ;
475
505
476
- spin_unlock_bh (& mpp_paths -> hashwlock [hash_idx ]);
506
+ spin_unlock_bh (& tbl -> hashwlock [hash_idx ]);
477
507
read_unlock_bh (& pathtbl_resize_lock );
478
508
if (grow ) {
479
509
set_bit (MESH_WORK_GROW_MPP_TABLE , & ifmsh -> wrkq_flags );
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
482
512
return 0 ;
483
513
484
514
err_exists :
485
- spin_unlock_bh (& mpp_paths -> hashwlock [hash_idx ]);
515
+ spin_unlock_bh (& tbl -> hashwlock [hash_idx ]);
486
516
read_unlock_bh (& pathtbl_resize_lock );
487
517
kfree (new_node );
488
518
err_node_alloc :
@@ -502,6 +532,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
502
532
*/
503
533
void mesh_plink_broken (struct sta_info * sta )
504
534
{
535
+ struct mesh_table * tbl ;
505
536
static const u8 bcast [ETH_ALEN ] = {0xff , 0xff , 0xff , 0xff , 0xff , 0xff };
506
537
struct mesh_path * mpath ;
507
538
struct mpath_node * node ;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
510
541
int i ;
511
542
512
543
rcu_read_lock ();
513
- for_each_mesh_entry (mesh_paths , p , node , i ) {
544
+ tbl = rcu_dereference (mesh_paths );
545
+ for_each_mesh_entry (tbl , p , node , i ) {
514
546
mpath = node -> mpath ;
515
547
spin_lock_bh (& mpath -> state_lock );
516
- if (mpath -> next_hop == sta &&
548
+ if (rcu_dereference ( mpath -> next_hop ) == sta &&
517
549
mpath -> flags & MESH_PATH_ACTIVE &&
518
550
!(mpath -> flags & MESH_PATH_FIXED )) {
519
551
mpath -> flags &= ~MESH_PATH_ACTIVE ;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
542
574
*/
543
575
void mesh_path_flush_by_nexthop (struct sta_info * sta )
544
576
{
577
+ struct mesh_table * tbl ;
545
578
struct mesh_path * mpath ;
546
579
struct mpath_node * node ;
547
580
struct hlist_node * p ;
548
581
int i ;
549
582
550
- for_each_mesh_entry (mesh_paths , p , node , i ) {
583
+ rcu_read_lock ();
584
+ tbl = rcu_dereference (mesh_paths );
585
+ for_each_mesh_entry (tbl , p , node , i ) {
551
586
mpath = node -> mpath ;
552
- if (mpath -> next_hop == sta )
587
+ if (rcu_dereference ( mpath -> next_hop ) == sta )
553
588
mesh_path_del (mpath -> dst , mpath -> sdata );
554
589
}
590
+ rcu_read_unlock ();
555
591
}
556
592
557
593
void mesh_path_flush (struct ieee80211_sub_if_data * sdata )
558
594
{
595
+ struct mesh_table * tbl ;
559
596
struct mesh_path * mpath ;
560
597
struct mpath_node * node ;
561
598
struct hlist_node * p ;
562
599
int i ;
563
600
564
- for_each_mesh_entry (mesh_paths , p , node , i ) {
601
+ rcu_read_lock ();
602
+ tbl = rcu_dereference (mesh_paths );
603
+ for_each_mesh_entry (tbl , p , node , i ) {
565
604
mpath = node -> mpath ;
566
605
if (mpath -> sdata == sdata )
567
606
mesh_path_del (mpath -> dst , mpath -> sdata );
568
607
}
608
+ rcu_read_unlock ();
569
609
}
570
610
571
611
static void mesh_path_node_reclaim (struct rcu_head * rp )
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
589
629
*/
590
630
int mesh_path_del (u8 * addr , struct ieee80211_sub_if_data * sdata )
591
631
{
632
+ struct mesh_table * tbl ;
592
633
struct mesh_path * mpath ;
593
634
struct mpath_node * node ;
594
635
struct hlist_head * bucket ;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
597
638
int err = 0 ;
598
639
599
640
read_lock_bh (& pathtbl_resize_lock );
600
- hash_idx = mesh_table_hash (addr , sdata , mesh_paths );
601
- bucket = & mesh_paths -> hash_buckets [hash_idx ];
641
+ tbl = resize_dereference_mesh_paths ();
642
+ hash_idx = mesh_table_hash (addr , sdata , tbl );
643
+ bucket = & tbl -> hash_buckets [hash_idx ];
602
644
603
- spin_lock_bh (& mesh_paths -> hashwlock [hash_idx ]);
645
+ spin_lock_bh (& tbl -> hashwlock [hash_idx ]);
604
646
hlist_for_each_entry (node , n , bucket , list ) {
605
647
mpath = node -> mpath ;
606
648
if (mpath -> sdata == sdata &&
607
- memcmp (addr , mpath -> dst , ETH_ALEN ) == 0 ) {
649
+ memcmp (addr , mpath -> dst , ETH_ALEN ) == 0 ) {
608
650
spin_lock_bh (& mpath -> state_lock );
609
651
mpath -> flags |= MESH_PATH_RESOLVING ;
610
652
hlist_del_rcu (& node -> list );
611
653
call_rcu (& node -> rcu , mesh_path_node_reclaim );
612
- atomic_dec (& mesh_paths -> entries );
654
+ atomic_dec (& tbl -> entries );
613
655
spin_unlock_bh (& mpath -> state_lock );
614
656
goto enddel ;
615
657
}
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
618
660
err = - ENXIO ;
619
661
enddel :
620
662
mesh_paths_generation ++ ;
621
- spin_unlock_bh (& mesh_paths -> hashwlock [hash_idx ]);
663
+ spin_unlock_bh (& tbl -> hashwlock [hash_idx ]);
622
664
read_unlock_bh (& pathtbl_resize_lock );
623
665
return err ;
624
666
}
@@ -747,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
747
789
748
790
int mesh_pathtbl_init (void )
749
791
{
750
- mesh_paths = mesh_table_alloc (INIT_PATHS_SIZE_ORDER );
751
- if (!mesh_paths )
792
+ struct mesh_table * tbl_path , * tbl_mpp ;
793
+
794
+ tbl_path = mesh_table_alloc (INIT_PATHS_SIZE_ORDER );
795
+ if (!tbl_path )
752
796
return - ENOMEM ;
753
- mesh_paths -> free_node = & mesh_path_node_free ;
754
- mesh_paths -> copy_node = & mesh_path_node_copy ;
755
- mesh_paths -> mean_chain_len = MEAN_CHAIN_LEN ;
797
+ tbl_path -> free_node = & mesh_path_node_free ;
798
+ tbl_path -> copy_node = & mesh_path_node_copy ;
799
+ tbl_path -> mean_chain_len = MEAN_CHAIN_LEN ;
756
800
757
- mpp_paths = mesh_table_alloc (INIT_PATHS_SIZE_ORDER );
758
- if (!mpp_paths ) {
759
- mesh_table_free (mesh_paths , true);
801
+ tbl_mpp = mesh_table_alloc (INIT_PATHS_SIZE_ORDER );
802
+ if (!tbl_mpp ) {
803
+ mesh_table_free (tbl_path , true);
760
804
return - ENOMEM ;
761
805
}
762
- mpp_paths -> free_node = & mesh_path_node_free ;
763
- mpp_paths -> copy_node = & mesh_path_node_copy ;
764
- mpp_paths -> mean_chain_len = MEAN_CHAIN_LEN ;
806
+ tbl_mpp -> free_node = & mesh_path_node_free ;
807
+ tbl_mpp -> copy_node = & mesh_path_node_copy ;
808
+ tbl_mpp -> mean_chain_len = MEAN_CHAIN_LEN ;
809
+
810
+ /* Need no locking since this is during init */
811
+ RCU_INIT_POINTER (mesh_paths , tbl_path );
812
+ RCU_INIT_POINTER (mpp_paths , tbl_mpp );
765
813
766
814
return 0 ;
767
815
}
768
816
769
817
void mesh_path_expire (struct ieee80211_sub_if_data * sdata )
770
818
{
819
+ struct mesh_table * tbl ;
771
820
struct mesh_path * mpath ;
772
821
struct mpath_node * node ;
773
822
struct hlist_node * p ;
774
823
int i ;
775
824
776
- read_lock_bh (& pathtbl_resize_lock );
777
- for_each_mesh_entry (mesh_paths , p , node , i ) {
825
+ rcu_read_lock ();
826
+ tbl = rcu_dereference (mesh_paths );
827
+ for_each_mesh_entry (tbl , p , node , i ) {
778
828
if (node -> mpath -> sdata != sdata )
779
829
continue ;
780
830
mpath = node -> mpath ;
781
831
spin_lock_bh (& mpath -> state_lock );
782
832
if ((!(mpath -> flags & MESH_PATH_RESOLVING )) &&
783
833
(!(mpath -> flags & MESH_PATH_FIXED )) &&
784
- time_after (jiffies ,
785
- mpath -> exp_time + MESH_PATH_EXPIRE )) {
834
+ time_after (jiffies , mpath -> exp_time + MESH_PATH_EXPIRE )) {
786
835
spin_unlock_bh (& mpath -> state_lock );
787
836
mesh_path_del (mpath -> dst , mpath -> sdata );
788
837
} else
789
838
spin_unlock_bh (& mpath -> state_lock );
790
839
}
791
- read_unlock_bh ( & pathtbl_resize_lock );
840
+ rcu_read_unlock ( );
792
841
}
793
842
794
843
void mesh_pathtbl_unregister (void )
795
844
{
796
- mesh_table_free (mesh_paths , true);
797
- mesh_table_free (mpp_paths , true);
845
+ /* no need for locking during exit path */
846
+ mesh_table_free (rcu_dereference_raw (mesh_paths ), true);
847
+ mesh_table_free (rcu_dereference_raw (mpp_paths ), true);
798
848
}
0 commit comments