38
38
#define MASK_ARRAY_SIZE_MIN 16
39
39
#define REHASH_INTERVAL (10 * 60 * HZ)
40
40
41
+ #define MC_DEFAULT_HASH_ENTRIES 256
41
42
#define MC_HASH_SHIFT 8
42
- #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
43
43
#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44
44
45
45
static struct kmem_cache * flow_cache ;
@@ -341,15 +341,79 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
341
341
}
342
342
}
343
343
344
+ static void __mask_cache_destroy (struct mask_cache * mc )
345
+ {
346
+ free_percpu (mc -> mask_cache );
347
+ kfree (mc );
348
+ }
349
+
350
+ static void mask_cache_rcu_cb (struct rcu_head * rcu )
351
+ {
352
+ struct mask_cache * mc = container_of (rcu , struct mask_cache , rcu );
353
+
354
+ __mask_cache_destroy (mc );
355
+ }
356
+
357
+ static struct mask_cache * tbl_mask_cache_alloc (u32 size )
358
+ {
359
+ struct mask_cache_entry __percpu * cache = NULL ;
360
+ struct mask_cache * new ;
361
+
362
+ /* Only allow size to be 0, or a power of 2, and does not exceed
363
+ * percpu allocation size.
364
+ */
365
+ if ((!is_power_of_2 (size ) && size != 0 ) ||
366
+ (size * sizeof (struct mask_cache_entry )) > PCPU_MIN_UNIT_SIZE )
367
+ return NULL ;
368
+
369
+ new = kzalloc (sizeof (* new ), GFP_KERNEL );
370
+ if (!new )
371
+ return NULL ;
372
+
373
+ new -> cache_size = size ;
374
+ if (new -> cache_size > 0 ) {
375
+ cache = __alloc_percpu (array_size (sizeof (struct mask_cache_entry ),
376
+ new -> cache_size ),
377
+ __alignof__(struct mask_cache_entry ));
378
+ if (!cache ) {
379
+ kfree (new );
380
+ return NULL ;
381
+ }
382
+ }
383
+
384
+ new -> mask_cache = cache ;
385
+ return new ;
386
+ }
387
+ int ovs_flow_tbl_masks_cache_resize (struct flow_table * table , u32 size )
388
+ {
389
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
390
+ struct mask_cache * new ;
391
+
392
+ if (size == mc -> cache_size )
393
+ return 0 ;
394
+
395
+ if ((!is_power_of_2 (size ) && size != 0 ) ||
396
+ (size * sizeof (struct mask_cache_entry )) > PCPU_MIN_UNIT_SIZE )
397
+ return - EINVAL ;
398
+
399
+ new = tbl_mask_cache_alloc (size );
400
+ if (!new )
401
+ return - ENOMEM ;
402
+
403
+ rcu_assign_pointer (table -> mask_cache , new );
404
+ call_rcu (& mc -> rcu , mask_cache_rcu_cb );
405
+
406
+ return 0 ;
407
+ }
408
+
344
409
int ovs_flow_tbl_init (struct flow_table * table )
345
410
{
346
411
struct table_instance * ti , * ufid_ti ;
412
+ struct mask_cache * mc ;
347
413
struct mask_array * ma ;
348
414
349
- table -> mask_cache = __alloc_percpu (sizeof (struct mask_cache_entry ) *
350
- MC_HASH_ENTRIES ,
351
- __alignof__(struct mask_cache_entry ));
352
- if (!table -> mask_cache )
415
+ mc = tbl_mask_cache_alloc (MC_DEFAULT_HASH_ENTRIES );
416
+ if (!mc )
353
417
return - ENOMEM ;
354
418
355
419
ma = tbl_mask_array_alloc (MASK_ARRAY_SIZE_MIN );
@@ -367,6 +431,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
367
431
rcu_assign_pointer (table -> ti , ti );
368
432
rcu_assign_pointer (table -> ufid_ti , ufid_ti );
369
433
rcu_assign_pointer (table -> mask_array , ma );
434
+ rcu_assign_pointer (table -> mask_cache , mc );
370
435
table -> last_rehash = jiffies ;
371
436
table -> count = 0 ;
372
437
table -> ufid_count = 0 ;
@@ -377,7 +442,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
377
442
free_mask_array :
378
443
__mask_array_destroy (ma );
379
444
free_mask_cache :
380
- free_percpu ( table -> mask_cache );
445
+ __mask_cache_destroy ( mc );
381
446
return - ENOMEM ;
382
447
}
383
448
@@ -453,9 +518,11 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
453
518
{
454
519
struct table_instance * ti = rcu_dereference_raw (table -> ti );
455
520
struct table_instance * ufid_ti = rcu_dereference_raw (table -> ufid_ti );
521
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
522
+ struct mask_array * ma = rcu_dereference_ovsl (table -> mask_array );
456
523
457
- free_percpu ( table -> mask_cache );
458
- call_rcu (& table -> mask_array -> rcu , mask_array_rcu_cb );
524
+ call_rcu ( & mc -> rcu , mask_cache_rcu_cb );
525
+ call_rcu (& ma -> rcu , mask_array_rcu_cb );
459
526
table_instance_destroy (table , ti , ufid_ti , false);
460
527
}
461
528
@@ -724,6 +791,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
724
791
u32 * n_mask_hit ,
725
792
u32 * n_cache_hit )
726
793
{
794
+ struct mask_cache * mc = rcu_dereference (tbl -> mask_cache );
727
795
struct mask_array * ma = rcu_dereference (tbl -> mask_array );
728
796
struct table_instance * ti = rcu_dereference (tbl -> ti );
729
797
struct mask_cache_entry * entries , * ce ;
@@ -733,7 +801,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
733
801
734
802
* n_mask_hit = 0 ;
735
803
* n_cache_hit = 0 ;
736
- if (unlikely (!skb_hash )) {
804
+ if (unlikely (!skb_hash || mc -> cache_size == 0 )) {
737
805
u32 mask_index = 0 ;
738
806
u32 cache = 0 ;
739
807
@@ -749,11 +817,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
749
817
750
818
ce = NULL ;
751
819
hash = skb_hash ;
752
- entries = this_cpu_ptr (tbl -> mask_cache );
820
+ entries = this_cpu_ptr (mc -> mask_cache );
753
821
754
822
/* Find the cache entry 'ce' to operate on. */
755
823
for (seg = 0 ; seg < MC_HASH_SEGS ; seg ++ ) {
756
- int index = hash & (MC_HASH_ENTRIES - 1 );
824
+ int index = hash & (mc -> cache_size - 1 );
757
825
struct mask_cache_entry * e ;
758
826
759
827
e = & entries [index ];
@@ -867,6 +935,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
867
935
return READ_ONCE (ma -> count );
868
936
}
869
937
938
+ u32 ovs_flow_tbl_masks_cache_size (const struct flow_table * table )
939
+ {
940
+ struct mask_cache * mc = rcu_dereference (table -> mask_cache );
941
+
942
+ return READ_ONCE (mc -> cache_size );
943
+ }
944
+
870
945
static struct table_instance * table_instance_expand (struct table_instance * ti ,
871
946
bool ufid )
872
947
{
@@ -1095,8 +1170,8 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
1095
1170
for (i = 0 ; i < masks_entries ; i ++ ) {
1096
1171
int index = masks_and_count [i ].index ;
1097
1172
1098
- new -> masks [new -> count ++ ] =
1099
- rcu_dereference_ovsl ( ma -> masks [index ]) ;
1173
+ if ( ovsl_dereference ( ma -> masks [index ]))
1174
+ new -> masks [ new -> count ++ ] = ma -> masks [index ];
1100
1175
}
1101
1176
1102
1177
rcu_assign_pointer (table -> mask_array , new );
0 commit comments