38
38
#define MASK_ARRAY_SIZE_MIN 16
39
39
#define REHASH_INTERVAL (10 * 60 * HZ)
40
40
41
+ #define MC_DEFAULT_HASH_ENTRIES 256
41
42
#define MC_HASH_SHIFT 8
42
- #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
43
43
#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44
44
45
45
static struct kmem_cache * flow_cache ;
@@ -341,15 +341,79 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
341
341
}
342
342
}
343
343
344
+ static void __mask_cache_destroy (struct mask_cache * mc )
345
+ {
346
+ free_percpu (mc -> mask_cache );
347
+ kfree (mc );
348
+ }
349
+
350
+ static void mask_cache_rcu_cb (struct rcu_head * rcu )
351
+ {
352
+ struct mask_cache * mc = container_of (rcu , struct mask_cache , rcu );
353
+
354
+ __mask_cache_destroy (mc );
355
+ }
356
+
357
+ static struct mask_cache * tbl_mask_cache_alloc (u32 size )
358
+ {
359
+ struct mask_cache_entry __percpu * cache = NULL ;
360
+ struct mask_cache * new ;
361
+
362
+ /* Only allow size to be 0, or a power of 2, and does not exceed
363
+ * percpu allocation size.
364
+ */
365
+ if ((!is_power_of_2 (size ) && size != 0 ) ||
366
+ (size * sizeof (struct mask_cache_entry )) > PCPU_MIN_UNIT_SIZE )
367
+ return NULL ;
368
+
369
+ new = kzalloc (sizeof (* new ), GFP_KERNEL );
370
+ if (!new )
371
+ return NULL ;
372
+
373
+ new -> cache_size = size ;
374
+ if (new -> cache_size > 0 ) {
375
+ cache = __alloc_percpu (array_size (sizeof (struct mask_cache_entry ),
376
+ new -> cache_size ),
377
+ __alignof__(struct mask_cache_entry ));
378
+ if (!cache ) {
379
+ kfree (new );
380
+ return NULL ;
381
+ }
382
+ }
383
+
384
+ new -> mask_cache = cache ;
385
+ return new ;
386
+ }
387
+ int ovs_flow_tbl_masks_cache_resize (struct flow_table * table , u32 size )
388
+ {
389
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
390
+ struct mask_cache * new ;
391
+
392
+ if (size == mc -> cache_size )
393
+ return 0 ;
394
+
395
+ if ((!is_power_of_2 (size ) && size != 0 ) ||
396
+ (size * sizeof (struct mask_cache_entry )) > PCPU_MIN_UNIT_SIZE )
397
+ return - EINVAL ;
398
+
399
+ new = tbl_mask_cache_alloc (size );
400
+ if (!new )
401
+ return - ENOMEM ;
402
+
403
+ rcu_assign_pointer (table -> mask_cache , new );
404
+ call_rcu (& mc -> rcu , mask_cache_rcu_cb );
405
+
406
+ return 0 ;
407
+ }
408
+
344
409
int ovs_flow_tbl_init (struct flow_table * table )
345
410
{
346
411
struct table_instance * ti , * ufid_ti ;
412
+ struct mask_cache * mc ;
347
413
struct mask_array * ma ;
348
414
349
- table -> mask_cache = __alloc_percpu (sizeof (struct mask_cache_entry ) *
350
- MC_HASH_ENTRIES ,
351
- __alignof__(struct mask_cache_entry ));
352
- if (!table -> mask_cache )
415
+ mc = tbl_mask_cache_alloc (MC_DEFAULT_HASH_ENTRIES );
416
+ if (!mc )
353
417
return - ENOMEM ;
354
418
355
419
ma = tbl_mask_array_alloc (MASK_ARRAY_SIZE_MIN );
@@ -367,6 +431,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
367
431
rcu_assign_pointer (table -> ti , ti );
368
432
rcu_assign_pointer (table -> ufid_ti , ufid_ti );
369
433
rcu_assign_pointer (table -> mask_array , ma );
434
+ rcu_assign_pointer (table -> mask_cache , mc );
370
435
table -> last_rehash = jiffies ;
371
436
table -> count = 0 ;
372
437
table -> ufid_count = 0 ;
@@ -377,7 +442,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
377
442
free_mask_array :
378
443
__mask_array_destroy (ma );
379
444
free_mask_cache :
380
- free_percpu ( table -> mask_cache );
445
+ __mask_cache_destroy ( mc );
381
446
return - ENOMEM ;
382
447
}
383
448
@@ -453,9 +518,11 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
453
518
{
454
519
struct table_instance * ti = rcu_dereference_raw (table -> ti );
455
520
struct table_instance * ufid_ti = rcu_dereference_raw (table -> ufid_ti );
521
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
522
+ struct mask_array * ma = rcu_dereference_ovsl (table -> mask_array );
456
523
457
- free_percpu ( table -> mask_cache );
458
- call_rcu (& table -> mask_array -> rcu , mask_array_rcu_cb );
524
+ call_rcu ( & mc -> rcu , mask_cache_rcu_cb );
525
+ call_rcu (& ma -> rcu , mask_array_rcu_cb );
459
526
table_instance_destroy (table , ti , ufid_ti , false);
460
527
}
461
528
@@ -667,6 +734,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
667
734
struct mask_array * ma ,
668
735
const struct sw_flow_key * key ,
669
736
u32 * n_mask_hit ,
737
+ u32 * n_cache_hit ,
670
738
u32 * index )
671
739
{
672
740
u64 * usage_counters = this_cpu_ptr (ma -> masks_usage_cntr );
@@ -682,6 +750,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
682
750
u64_stats_update_begin (& ma -> syncp );
683
751
usage_counters [* index ]++ ;
684
752
u64_stats_update_end (& ma -> syncp );
753
+ (* n_cache_hit )++ ;
685
754
return flow ;
686
755
}
687
756
}
@@ -719,8 +788,10 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
719
788
struct sw_flow * ovs_flow_tbl_lookup_stats (struct flow_table * tbl ,
720
789
const struct sw_flow_key * key ,
721
790
u32 skb_hash ,
722
- u32 * n_mask_hit )
791
+ u32 * n_mask_hit ,
792
+ u32 * n_cache_hit )
723
793
{
794
+ struct mask_cache * mc = rcu_dereference (tbl -> mask_cache );
724
795
struct mask_array * ma = rcu_dereference (tbl -> mask_array );
725
796
struct table_instance * ti = rcu_dereference (tbl -> ti );
726
797
struct mask_cache_entry * entries , * ce ;
@@ -729,10 +800,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
729
800
int seg ;
730
801
731
802
* n_mask_hit = 0 ;
732
- if (unlikely (!skb_hash )) {
803
+ * n_cache_hit = 0 ;
804
+ if (unlikely (!skb_hash || mc -> cache_size == 0 )) {
733
805
u32 mask_index = 0 ;
806
+ u32 cache = 0 ;
734
807
735
- return flow_lookup (tbl , ti , ma , key , n_mask_hit , & mask_index );
808
+ return flow_lookup (tbl , ti , ma , key , n_mask_hit , & cache ,
809
+ & mask_index );
736
810
}
737
811
738
812
/* Pre and post recirulation flows usually have the same skb_hash
@@ -743,17 +817,17 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
743
817
744
818
ce = NULL ;
745
819
hash = skb_hash ;
746
- entries = this_cpu_ptr (tbl -> mask_cache );
820
+ entries = this_cpu_ptr (mc -> mask_cache );
747
821
748
822
/* Find the cache entry 'ce' to operate on. */
749
823
for (seg = 0 ; seg < MC_HASH_SEGS ; seg ++ ) {
750
- int index = hash & (MC_HASH_ENTRIES - 1 );
824
+ int index = hash & (mc -> cache_size - 1 );
751
825
struct mask_cache_entry * e ;
752
826
753
827
e = & entries [index ];
754
828
if (e -> skb_hash == skb_hash ) {
755
829
flow = flow_lookup (tbl , ti , ma , key , n_mask_hit ,
756
- & e -> mask_index );
830
+ n_cache_hit , & e -> mask_index );
757
831
if (!flow )
758
832
e -> skb_hash = 0 ;
759
833
return flow ;
@@ -766,10 +840,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
766
840
}
767
841
768
842
/* Cache miss, do full lookup. */
769
- flow = flow_lookup (tbl , ti , ma , key , n_mask_hit , & ce -> mask_index );
843
+ flow = flow_lookup (tbl , ti , ma , key , n_mask_hit , n_cache_hit ,
844
+ & ce -> mask_index );
770
845
if (flow )
771
846
ce -> skb_hash = skb_hash ;
772
847
848
+ * n_cache_hit = 0 ;
773
849
return flow ;
774
850
}
775
851
@@ -779,9 +855,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
779
855
struct table_instance * ti = rcu_dereference_ovsl (tbl -> ti );
780
856
struct mask_array * ma = rcu_dereference_ovsl (tbl -> mask_array );
781
857
u32 __always_unused n_mask_hit ;
858
+ u32 __always_unused n_cache_hit ;
782
859
u32 index = 0 ;
783
860
784
- return flow_lookup (tbl , ti , ma , key , & n_mask_hit , & index );
861
+ return flow_lookup (tbl , ti , ma , key , & n_mask_hit , & n_cache_hit , & index );
785
862
}
786
863
787
864
struct sw_flow * ovs_flow_tbl_lookup_exact (struct flow_table * tbl ,
@@ -858,6 +935,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
858
935
return READ_ONCE (ma -> count );
859
936
}
860
937
938
+ u32 ovs_flow_tbl_masks_cache_size (const struct flow_table * table )
939
+ {
940
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
941
+
942
+ return READ_ONCE (mc -> cache_size );
943
+ }
944
+
861
945
static struct table_instance * table_instance_expand (struct table_instance * ti ,
862
946
bool ufid )
863
947
{
@@ -1086,8 +1170,8 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
1086
1170
for (i = 0 ; i < masks_entries ; i ++ ) {
1087
1171
int index = masks_and_count [i ].index ;
1088
1172
1089
- new -> masks [new -> count ++ ] =
1090
- rcu_dereference_ovsl ( ma -> masks [index ]) ;
1173
+ if ( ovsl_dereference ( ma -> masks [index ]))
1174
+ new -> masks [ new -> count ++ ] = ma -> masks [index ];
1091
1175
}
1092
1176
1093
1177
rcu_assign_pointer (table -> mask_array , new );
0 commit comments