Skip to content

Commit 3354822

Browse files
elic307iSaeed Mahameed
authored andcommitted
net/mlx5: Use dynamic msix vectors allocation
Current implementation calculates the number and the partitioaning of available interrupts vectors and then allocates all the interrupt vectors. Here, whenever dynamic msix allocation is supported, we change this to use msix vectors dynamically so a vectors is actually allocated only when needed. The current pool logic is kept in place to take care of partitioning the vectors between the consumers and take care of reference counting. However, the vectors are allocated only when needed. Subsequent patches will make use of this to allocate vectors for VDPA. Signed-off-by: Eli Cohen <[email protected]> Reviewed-by: Shay Drory <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]> Reviewed-by: Jacob Keller <[email protected]>
1 parent b48a0f7 commit 3354822

File tree

6 files changed

+129
-91
lines changed

6 files changed

+129
-91
lines changed

drivers/net/ethernet/mellanox/mlx5/core/eq.c

Lines changed: 49 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include "lib/clock.h"
1919
#include "diag/fw_tracer.h"
2020
#include "mlx5_irq.h"
21+
#include "pci_irq.h"
2122
#include "devlink.h"
2223
#include "en_accel/ipsec.h"
2324

@@ -61,9 +62,7 @@ struct mlx5_eq_table {
6162
struct mlx5_irq_table *irq_table;
6263
struct mlx5_irq **comp_irqs;
6364
struct mlx5_irq *ctrl_irq;
64-
#ifdef CONFIG_RFS_ACCEL
6565
struct cpu_rmap *rmap;
66-
#endif
6766
};
6867

6968
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -839,7 +838,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
839838
}
840839
spread_done:
841840
rcu_read_unlock();
842-
ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
841+
ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
843842
kfree(cpus);
844843
return ret;
845844
}
@@ -888,6 +887,40 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
888887
return ret;
889888
}
890889

890+
#ifdef CONFIG_RFS_ACCEL
891+
static int alloc_rmap(struct mlx5_core_dev *mdev)
892+
{
893+
struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
894+
895+
/* rmap is a mapping between irq number and queue number.
896+
* Each irq can be assigned only to a single rmap.
897+
* Since SFs share IRQs, rmap mapping cannot function correctly
898+
* for irqs that are shared between different core/netdev RX rings.
899+
* Hence we don't allow netdev rmap for SFs.
900+
*/
901+
if (mlx5_core_is_sf(mdev))
902+
return 0;
903+
904+
eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
905+
if (!eq_table->rmap)
906+
return -ENOMEM;
907+
return 0;
908+
}
909+
910+
static void free_rmap(struct mlx5_core_dev *mdev)
911+
{
912+
struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
913+
914+
if (eq_table->rmap) {
915+
free_irq_cpu_rmap(eq_table->rmap);
916+
eq_table->rmap = NULL;
917+
}
918+
}
919+
#else
920+
static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
921+
static void free_rmap(struct mlx5_core_dev *mdev) {}
922+
#endif
923+
891924
static void destroy_comp_eqs(struct mlx5_core_dev *dev)
892925
{
893926
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -903,6 +936,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
903936
kfree(eq);
904937
}
905938
comp_irqs_release(dev);
939+
free_rmap(dev);
906940
}
907941

908942
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
@@ -929,9 +963,16 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
929963
int err;
930964
int i;
931965

966+
err = alloc_rmap(dev);
967+
if (err)
968+
return err;
969+
932970
ncomp_eqs = comp_irqs_request(dev);
933-
if (ncomp_eqs < 0)
934-
return ncomp_eqs;
971+
if (ncomp_eqs < 0) {
972+
err = ncomp_eqs;
973+
goto err_irqs_req;
974+
}
975+
935976
INIT_LIST_HEAD(&table->comp_eqs_list);
936977
nent = comp_eq_depth_devlink_param_get(dev);
937978

@@ -976,6 +1017,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
9761017
kfree(eq);
9771018
clean:
9781019
destroy_comp_eqs(dev);
1020+
err_irqs_req:
1021+
free_rmap(dev);
9791022
return err;
9801023
}
9811024

@@ -1054,55 +1097,12 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
10541097
return ERR_PTR(-ENOENT);
10551098
}
10561099

1057-
static void clear_rmap(struct mlx5_core_dev *dev)
1058-
{
1059-
#ifdef CONFIG_RFS_ACCEL
1060-
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1061-
1062-
free_irq_cpu_rmap(eq_table->rmap);
1063-
#endif
1064-
}
1065-
1066-
static int set_rmap(struct mlx5_core_dev *mdev)
1067-
{
1068-
int err = 0;
1069-
#ifdef CONFIG_RFS_ACCEL
1070-
struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
1071-
int vecidx;
1072-
1073-
eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
1074-
if (!eq_table->rmap) {
1075-
err = -ENOMEM;
1076-
mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
1077-
goto err_out;
1078-
}
1079-
1080-
for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
1081-
err = irq_cpu_rmap_add(eq_table->rmap,
1082-
pci_irq_vector(mdev->pdev, vecidx));
1083-
if (err) {
1084-
mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
1085-
err);
1086-
goto err_irq_cpu_rmap_add;
1087-
}
1088-
}
1089-
return 0;
1090-
1091-
err_irq_cpu_rmap_add:
1092-
clear_rmap(mdev);
1093-
err_out:
1094-
#endif
1095-
return err;
1096-
}
1097-
10981100
/* This function should only be called after mlx5_cmd_force_teardown_hca */
10991101
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
11001102
{
11011103
struct mlx5_eq_table *table = dev->priv.eq_table;
11021104

11031105
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
1104-
if (!mlx5_core_is_sf(dev))
1105-
clear_rmap(dev);
11061106
mlx5_irq_table_destroy(dev);
11071107
mutex_unlock(&table->lock);
11081108
}
@@ -1139,38 +1139,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
11391139
goto err_async_eqs;
11401140
}
11411141

1142-
if (!mlx5_core_is_sf(dev)) {
1143-
/* rmap is a mapping between irq number and queue number.
1144-
* each irq can be assign only to a single rmap.
1145-
* since SFs share IRQs, rmap mapping cannot function correctly
1146-
* for irqs that are shared for different core/netdev RX rings.
1147-
* Hence we don't allow netdev rmap for SFs
1148-
*/
1149-
err = set_rmap(dev);
1150-
if (err)
1151-
goto err_rmap;
1152-
}
1153-
11541142
err = create_comp_eqs(dev);
11551143
if (err) {
11561144
mlx5_core_err(dev, "Failed to create completion EQs\n");
11571145
goto err_comp_eqs;
11581146
}
11591147

11601148
return 0;
1149+
11611150
err_comp_eqs:
1162-
if (!mlx5_core_is_sf(dev))
1163-
clear_rmap(dev);
1164-
err_rmap:
11651151
destroy_async_eqs(dev);
11661152
err_async_eqs:
11671153
return err;
11681154
}
11691155

11701156
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
11711157
{
1172-
if (!mlx5_core_is_sf(dev))
1173-
clear_rmap(dev);
11741158
destroy_comp_eqs(dev);
11751159
destroy_async_eqs(dev);
11761160
}

drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
6565
cpu_get(pool, cpumask_first(&af_desc->mask));
6666
}
6767
return mlx5_irq_alloc(pool, irq_index,
68-
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc);
68+
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
69+
NULL);
6970
}
7071

7172
/* Looking for the IRQ with the smallest refcount that fits req_mask.
@@ -205,7 +206,7 @@ int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
205206
* The PF IRQs are already allocated and binded to CPU
206207
* at this point. Hence, only an index is needed.
207208
*/
208-
irq = mlx5_irq_request(dev, i, NULL);
209+
irq = mlx5_irq_request(dev, i, NULL, NULL);
209210
if (IS_ERR(irq))
210211
break;
211212
irqs[i] = irq;

drivers/net/ethernet/mellanox/mlx5/core/main.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,6 @@
4646
#include <linux/kmod.h>
4747
#include <linux/mlx5/mlx5_ifc.h>
4848
#include <linux/mlx5/vport.h>
49-
#ifdef CONFIG_RFS_ACCEL
50-
#include <linux/cpu_rmap.h>
51-
#endif
5249
#include <linux/version.h>
5350
#include <net/devlink.h>
5451
#include "mlx5_core.h"

drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#define MLX5_COMP_EQS_PER_SF 8
1010

1111
struct mlx5_irq;
12+
struct cpu_rmap;
1213

1314
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
1415
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
@@ -25,9 +26,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
2526
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
2627
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
2728
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
28-
struct irq_affinity_desc *af_desc);
29+
struct irq_affinity_desc *af_desc,
30+
struct cpu_rmap **rmap);
2931
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
30-
struct mlx5_irq **irqs);
32+
struct mlx5_irq **irqs, struct cpu_rmap **rmap);
3133
void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
3234
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
3335
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);

0 commit comments

Comments
 (0)