@@ -269,8 +269,9 @@ static void tree_put_node(struct fs_node *node)
269
269
if (node -> del_sw_func )
270
270
node -> del_sw_func (node );
271
271
up_write_ref_node (parent_node );
272
+ } else {
273
+ kfree (node );
272
274
}
273
- kfree (node );
274
275
node = NULL ;
275
276
}
276
277
if (!node && parent_node )
@@ -389,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
389
390
return container_of (ns , struct mlx5_flow_root_namespace , ns );
390
391
}
391
392
393
+ static inline struct mlx5_flow_steering * get_steering (struct fs_node * node )
394
+ {
395
+ struct mlx5_flow_root_namespace * root = find_root (node );
396
+
397
+ if (root )
398
+ return root -> dev -> priv .steering ;
399
+ return NULL ;
400
+ }
401
+
392
402
static inline struct mlx5_core_dev * get_dev (struct fs_node * node )
393
403
{
394
404
struct mlx5_flow_root_namespace * root = find_root (node );
@@ -424,6 +434,7 @@ static void del_sw_flow_table(struct fs_node *node)
424
434
rhltable_destroy (& ft -> fgs_hash );
425
435
fs_get_obj (prio , ft -> node .parent );
426
436
prio -> num_ft -- ;
437
+ kfree (ft );
427
438
}
428
439
429
440
static void del_sw_hw_rule (struct fs_node * node )
@@ -469,6 +480,7 @@ static void del_sw_hw_rule(struct fs_node *node)
469
480
"%s can't del rule fg id=%d fte_index=%d\n" ,
470
481
__func__ , fg -> id , fte -> index );
471
482
}
483
+ kfree (rule );
472
484
}
473
485
474
486
static void del_hw_fte (struct fs_node * node )
@@ -497,6 +509,7 @@ static void del_hw_fte(struct fs_node *node)
497
509
498
510
static void del_sw_fte (struct fs_node * node )
499
511
{
512
+ struct mlx5_flow_steering * steering = get_steering (node );
500
513
struct mlx5_flow_group * fg ;
501
514
struct fs_fte * fte ;
502
515
int err ;
@@ -509,6 +522,7 @@ static void del_sw_fte(struct fs_node *node)
509
522
rhash_fte );
510
523
WARN_ON (err );
511
524
ida_simple_remove (& fg -> fte_allocator , fte -> index - fg -> start_index );
525
+ kmem_cache_free (steering -> ftes_cache , fte );
512
526
}
513
527
514
528
static void del_hw_flow_group (struct fs_node * node )
@@ -529,6 +543,7 @@ static void del_hw_flow_group(struct fs_node *node)
529
543
530
544
static void del_sw_flow_group (struct fs_node * node )
531
545
{
546
+ struct mlx5_flow_steering * steering = get_steering (node );
532
547
struct mlx5_flow_group * fg ;
533
548
struct mlx5_flow_table * ft ;
534
549
int err ;
@@ -544,6 +559,7 @@ static void del_sw_flow_group(struct fs_node *node)
544
559
& fg -> hash ,
545
560
rhash_fg );
546
561
WARN_ON (err );
562
+ kmem_cache_free (steering -> fgs_cache , fg );
547
563
}
548
564
549
565
static int insert_fte (struct mlx5_flow_group * fg , struct fs_fte * fte )
@@ -571,12 +587,14 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
571
587
return ret ;
572
588
}
573
589
574
- static struct fs_fte * alloc_fte (u32 * match_value ,
590
+ static struct fs_fte * alloc_fte (struct mlx5_flow_table * ft ,
591
+ u32 * match_value ,
575
592
struct mlx5_flow_act * flow_act )
576
593
{
594
+ struct mlx5_flow_steering * steering = get_steering (& ft -> node );
577
595
struct fs_fte * fte ;
578
596
579
- fte = kzalloc ( sizeof ( * fte ) , GFP_KERNEL );
597
+ fte = kmem_cache_zalloc ( steering -> ftes_cache , GFP_KERNEL );
580
598
if (!fte )
581
599
return ERR_PTR (- ENOMEM );
582
600
@@ -592,27 +610,29 @@ static struct fs_fte *alloc_fte(u32 *match_value,
592
610
return fte ;
593
611
}
594
612
595
- static void dealloc_flow_group (struct mlx5_flow_group * fg )
613
+ static void dealloc_flow_group (struct mlx5_flow_steering * steering ,
614
+ struct mlx5_flow_group * fg )
596
615
{
597
616
rhashtable_destroy (& fg -> ftes_hash );
598
- kfree ( fg );
617
+ kmem_cache_free ( steering -> fgs_cache , fg );
599
618
}
600
619
601
- static struct mlx5_flow_group * alloc_flow_group (u8 match_criteria_enable ,
620
+ static struct mlx5_flow_group * alloc_flow_group (struct mlx5_flow_steering * steering ,
621
+ u8 match_criteria_enable ,
602
622
void * match_criteria ,
603
623
int start_index ,
604
624
int end_index )
605
625
{
606
626
struct mlx5_flow_group * fg ;
607
627
int ret ;
608
628
609
- fg = kzalloc ( sizeof ( * fg ) , GFP_KERNEL );
629
+ fg = kmem_cache_zalloc ( steering -> fgs_cache , GFP_KERNEL );
610
630
if (!fg )
611
631
return ERR_PTR (- ENOMEM );
612
632
613
633
ret = rhashtable_init (& fg -> ftes_hash , & rhash_fte );
614
634
if (ret ) {
615
- kfree ( fg );
635
+ kmem_cache_free ( steering -> fgs_cache , fg );
616
636
return ERR_PTR (ret );
617
637
}
618
638
ida_init (& fg -> fte_allocator );
@@ -633,10 +653,11 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
633
653
int end_index ,
634
654
struct list_head * prev )
635
655
{
656
+ struct mlx5_flow_steering * steering = get_steering (& ft -> node );
636
657
struct mlx5_flow_group * fg ;
637
658
int ret ;
638
659
639
- fg = alloc_flow_group (match_criteria_enable , match_criteria ,
660
+ fg = alloc_flow_group (steering , match_criteria_enable , match_criteria ,
640
661
start_index , end_index );
641
662
if (IS_ERR (fg ))
642
663
return fg ;
@@ -646,7 +667,7 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
646
667
& fg -> hash ,
647
668
rhash_fg );
648
669
if (ret ) {
649
- dealloc_flow_group (fg );
670
+ dealloc_flow_group (steering , fg );
650
671
return ERR_PTR (ret );
651
672
}
652
673
@@ -1569,6 +1590,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1569
1590
int dest_num ,
1570
1591
int ft_version )
1571
1592
{
1593
+ struct mlx5_flow_steering * steering = get_steering (& ft -> node );
1572
1594
struct mlx5_flow_group * g ;
1573
1595
struct mlx5_flow_handle * rule ;
1574
1596
struct match_list * iter ;
@@ -1577,7 +1599,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1577
1599
u64 version ;
1578
1600
int err ;
1579
1601
1580
- fte = alloc_fte (spec -> match_value , flow_act );
1602
+ fte = alloc_fte (ft , spec -> match_value , flow_act );
1581
1603
if (IS_ERR (fte ))
1582
1604
return ERR_PTR (- ENOMEM );
1583
1605
@@ -1611,7 +1633,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1611
1633
flow_act , dest , dest_num , fte_tmp );
1612
1634
up_write_ref_node (& fte_tmp -> node );
1613
1635
tree_put_node (& fte_tmp -> node );
1614
- kfree ( fte );
1636
+ kmem_cache_free ( steering -> ftes_cache , fte );
1615
1637
return rule ;
1616
1638
}
1617
1639
@@ -1653,7 +1675,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1653
1675
continue ;
1654
1676
list_for_each_entry (iter , match_head , list )
1655
1677
up_write_ref_node (& iter -> g -> node );
1656
- kfree ( fte );
1678
+ kmem_cache_free ( steering -> ftes_cache , fte );
1657
1679
return ERR_PTR (err );
1658
1680
}
1659
1681
@@ -1670,7 +1692,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1670
1692
out :
1671
1693
list_for_each_entry (iter , match_head , list )
1672
1694
up_write_ref_node (& iter -> g -> node );
1673
- kfree ( fte );
1695
+ kmem_cache_free ( steering -> ftes_cache , fte );
1674
1696
return rule ;
1675
1697
}
1676
1698
@@ -1682,6 +1704,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1682
1704
int dest_num )
1683
1705
1684
1706
{
1707
+ struct mlx5_flow_steering * steering = get_steering (& ft -> node );
1685
1708
struct mlx5_flow_group * g ;
1686
1709
struct mlx5_flow_handle * rule ;
1687
1710
struct match_list_head match_head ;
@@ -1740,15 +1763,15 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1740
1763
if (err )
1741
1764
goto err_release_fg ;
1742
1765
1743
- fte = alloc_fte (spec -> match_value , flow_act );
1766
+ fte = alloc_fte (ft , spec -> match_value , flow_act );
1744
1767
if (IS_ERR (fte )) {
1745
1768
err = PTR_ERR (fte );
1746
1769
goto err_release_fg ;
1747
1770
}
1748
1771
1749
1772
err = insert_fte (g , fte );
1750
1773
if (err ) {
1751
- kfree ( fte );
1774
+ kmem_cache_free ( steering -> ftes_cache , fte );
1752
1775
goto err_release_fg ;
1753
1776
}
1754
1777
@@ -2281,6 +2304,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2281
2304
cleanup_root_ns (steering -> sniffer_rx_root_ns );
2282
2305
cleanup_root_ns (steering -> sniffer_tx_root_ns );
2283
2306
mlx5_cleanup_fc_stats (dev );
2307
+ kmem_cache_destroy (steering -> ftes_cache );
2308
+ kmem_cache_destroy (steering -> fgs_cache );
2284
2309
kfree (steering );
2285
2310
}
2286
2311
@@ -2386,6 +2411,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2386
2411
steering -> dev = dev ;
2387
2412
dev -> priv .steering = steering ;
2388
2413
2414
+ steering -> fgs_cache = kmem_cache_create ("mlx5_fs_fgs" ,
2415
+ sizeof (struct mlx5_flow_group ), 0 ,
2416
+ 0 , NULL );
2417
+ steering -> ftes_cache = kmem_cache_create ("mlx5_fs_ftes" , sizeof (struct fs_fte ), 0 ,
2418
+ 0 , NULL );
2419
+ if (!steering -> ftes_cache || !steering -> fgs_cache ) {
2420
+ err = - ENOMEM ;
2421
+ goto err ;
2422
+ }
2423
+
2389
2424
if ((((MLX5_CAP_GEN (dev , port_type ) == MLX5_CAP_PORT_TYPE_ETH ) &&
2390
2425
(MLX5_CAP_GEN (dev , nic_flow_table ))) ||
2391
2426
((MLX5_CAP_GEN (dev , port_type ) == MLX5_CAP_PORT_TYPE_IB ) &&
0 commit comments