18
18
#include "lib/clock.h"
19
19
#include "diag/fw_tracer.h"
20
20
#include "mlx5_irq.h"
21
+ #include "pci_irq.h"
21
22
#include "devlink.h"
22
23
#include "en_accel/ipsec.h"
23
24
@@ -61,9 +62,7 @@ struct mlx5_eq_table {
61
62
struct mlx5_irq_table * irq_table ;
62
63
struct mlx5_irq * * comp_irqs ;
63
64
struct mlx5_irq * ctrl_irq ;
64
- #ifdef CONFIG_RFS_ACCEL
65
65
struct cpu_rmap * rmap ;
66
- #endif
67
66
};
68
67
69
68
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -839,7 +838,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
839
838
}
840
839
spread_done :
841
840
rcu_read_unlock ();
842
- ret = mlx5_irqs_request_vectors (dev , cpus , ncomp_eqs , table -> comp_irqs );
841
+ ret = mlx5_irqs_request_vectors (dev , cpus , ncomp_eqs , table -> comp_irqs , & table -> rmap );
843
842
kfree (cpus );
844
843
return ret ;
845
844
}
@@ -888,6 +887,40 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
888
887
return ret ;
889
888
}
890
889
890
+ #ifdef CONFIG_RFS_ACCEL
891
+ static int alloc_rmap (struct mlx5_core_dev * mdev )
892
+ {
893
+ struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
894
+
895
+ /* rmap is a mapping between irq number and queue number.
896
+ * Each irq can be assigned only to a single rmap.
897
+ * Since SFs share IRQs, rmap mapping cannot function correctly
898
+ * for irqs that are shared between different core/netdev RX rings.
899
+ * Hence we don't allow netdev rmap for SFs.
900
+ */
901
+ if (mlx5_core_is_sf (mdev ))
902
+ return 0 ;
903
+
904
+ eq_table -> rmap = alloc_irq_cpu_rmap (eq_table -> num_comp_eqs );
905
+ if (!eq_table -> rmap )
906
+ return - ENOMEM ;
907
+ return 0 ;
908
+ }
909
+
910
+ static void free_rmap (struct mlx5_core_dev * mdev )
911
+ {
912
+ struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
913
+
914
+ if (eq_table -> rmap ) {
915
+ free_irq_cpu_rmap (eq_table -> rmap );
916
+ eq_table -> rmap = NULL ;
917
+ }
918
+ }
919
+ #else
920
+ static int alloc_rmap (struct mlx5_core_dev * mdev ) { return 0 ; }
921
+ static void free_rmap (struct mlx5_core_dev * mdev ) {}
922
+ #endif
923
+
891
924
static void destroy_comp_eqs (struct mlx5_core_dev * dev )
892
925
{
893
926
struct mlx5_eq_table * table = dev -> priv .eq_table ;
@@ -903,6 +936,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
903
936
kfree (eq );
904
937
}
905
938
comp_irqs_release (dev );
939
+ free_rmap (dev );
906
940
}
907
941
908
942
static u16 comp_eq_depth_devlink_param_get (struct mlx5_core_dev * dev )
@@ -929,9 +963,16 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
929
963
int err ;
930
964
int i ;
931
965
966
+ err = alloc_rmap (dev );
967
+ if (err )
968
+ return err ;
969
+
932
970
ncomp_eqs = comp_irqs_request (dev );
933
- if (ncomp_eqs < 0 )
934
- return ncomp_eqs ;
971
+ if (ncomp_eqs < 0 ) {
972
+ err = ncomp_eqs ;
973
+ goto err_irqs_req ;
974
+ }
975
+
935
976
INIT_LIST_HEAD (& table -> comp_eqs_list );
936
977
nent = comp_eq_depth_devlink_param_get (dev );
937
978
@@ -976,6 +1017,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
976
1017
kfree (eq );
977
1018
clean :
978
1019
destroy_comp_eqs (dev );
1020
+ err_irqs_req :
1021
+ free_rmap (dev );
979
1022
return err ;
980
1023
}
981
1024
@@ -1054,55 +1097,12 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1054
1097
return ERR_PTR (- ENOENT );
1055
1098
}
1056
1099
1057
- static void clear_rmap (struct mlx5_core_dev * dev )
1058
- {
1059
- #ifdef CONFIG_RFS_ACCEL
1060
- struct mlx5_eq_table * eq_table = dev -> priv .eq_table ;
1061
-
1062
- free_irq_cpu_rmap (eq_table -> rmap );
1063
- #endif
1064
- }
1065
-
1066
- static int set_rmap (struct mlx5_core_dev * mdev )
1067
- {
1068
- int err = 0 ;
1069
- #ifdef CONFIG_RFS_ACCEL
1070
- struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
1071
- int vecidx ;
1072
-
1073
- eq_table -> rmap = alloc_irq_cpu_rmap (eq_table -> num_comp_eqs );
1074
- if (!eq_table -> rmap ) {
1075
- err = - ENOMEM ;
1076
- mlx5_core_err (mdev , "Failed to allocate cpu_rmap. err %d" , err );
1077
- goto err_out ;
1078
- }
1079
-
1080
- for (vecidx = 0 ; vecidx < eq_table -> num_comp_eqs ; vecidx ++ ) {
1081
- err = irq_cpu_rmap_add (eq_table -> rmap ,
1082
- pci_irq_vector (mdev -> pdev , vecidx ));
1083
- if (err ) {
1084
- mlx5_core_err (mdev , "irq_cpu_rmap_add failed. err %d" ,
1085
- err );
1086
- goto err_irq_cpu_rmap_add ;
1087
- }
1088
- }
1089
- return 0 ;
1090
-
1091
- err_irq_cpu_rmap_add :
1092
- clear_rmap (mdev );
1093
- err_out :
1094
- #endif
1095
- return err ;
1096
- }
1097
-
1098
1100
/* This function should only be called after mlx5_cmd_force_teardown_hca */
1099
1101
void mlx5_core_eq_free_irqs (struct mlx5_core_dev * dev )
1100
1102
{
1101
1103
struct mlx5_eq_table * table = dev -> priv .eq_table ;
1102
1104
1103
1105
mutex_lock (& table -> lock ); /* sync with create/destroy_async_eq */
1104
- if (!mlx5_core_is_sf (dev ))
1105
- clear_rmap (dev );
1106
1106
mlx5_irq_table_destroy (dev );
1107
1107
mutex_unlock (& table -> lock );
1108
1108
}
@@ -1139,38 +1139,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1139
1139
goto err_async_eqs ;
1140
1140
}
1141
1141
1142
- if (!mlx5_core_is_sf (dev )) {
1143
- /* rmap is a mapping between irq number and queue number.
1144
- * each irq can be assign only to a single rmap.
1145
- * since SFs share IRQs, rmap mapping cannot function correctly
1146
- * for irqs that are shared for different core/netdev RX rings.
1147
- * Hence we don't allow netdev rmap for SFs
1148
- */
1149
- err = set_rmap (dev );
1150
- if (err )
1151
- goto err_rmap ;
1152
- }
1153
-
1154
1142
err = create_comp_eqs (dev );
1155
1143
if (err ) {
1156
1144
mlx5_core_err (dev , "Failed to create completion EQs\n" );
1157
1145
goto err_comp_eqs ;
1158
1146
}
1159
1147
1160
1148
return 0 ;
1149
+
1161
1150
err_comp_eqs :
1162
- if (!mlx5_core_is_sf (dev ))
1163
- clear_rmap (dev );
1164
- err_rmap :
1165
1151
destroy_async_eqs (dev );
1166
1152
err_async_eqs :
1167
1153
return err ;
1168
1154
}
1169
1155
1170
1156
void mlx5_eq_table_destroy (struct mlx5_core_dev * dev )
1171
1157
{
1172
- if (!mlx5_core_is_sf (dev ))
1173
- clear_rmap (dev );
1174
1158
destroy_comp_eqs (dev );
1175
1159
destroy_async_eqs (dev );
1176
1160
}
0 commit comments