Skip to content

Commit 5fe9dec

Browse files
Eli CohenSaeed Mahameed
authored andcommitted
IB/mlx5: Use blue flame register allocator in mlx5_ib
Make use of the blue flame registers allocator at mlx5_ib. Since blue flame was not really supported we remove all the code that is related to blue flame and we let all consumers to use the same blue flame register. Once blue flame is supported we will add the code. As part of this patch we also move the definition of struct mlx5_bf to mlx5_ib.h as it is only used by mlx5_ib. Signed-off-by: Eli Cohen <[email protected]> Reviewed-by: Matan Barak <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 0118717 commit 5fe9dec

File tree

10 files changed

+59
-221
lines changed

10 files changed

+59
-221
lines changed

drivers/infiniband/hw/mlx5/cq.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
689689
{
690690
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
691691
struct mlx5_ib_cq *cq = to_mcq(ibcq);
692-
void __iomem *uar_page = mdev->priv.bfregi.uars[0].map;
692+
void __iomem *uar_page = mdev->priv.uar->map;
693693
unsigned long irq_flags;
694694
int ret = 0;
695695

@@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
704704
mlx5_cq_arm(&cq->mcq,
705705
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
706706
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
707-
uar_page,
708-
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
709-
to_mcq(ibcq)->mcq.cons_index);
707+
uar_page, to_mcq(ibcq)->mcq.cons_index);
710708

711709
return ret;
712710
}
@@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
886884
MLX5_SET(cqc, cqc, log_page_size,
887885
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
888886

889-
*index = dev->mdev->priv.bfregi.uars[0].index;
887+
*index = dev->mdev->priv.uar->index;
890888

891889
return 0;
892890

drivers/infiniband/hw/mlx5/main.c

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3074,8 +3074,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
30743074
if (mlx5_use_mad_ifc(dev))
30753075
get_ext_port_caps(dev);
30763076

3077-
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
3078-
30793077
if (!mlx5_lag_is_active(mdev))
30803078
name = "mlx5_%d";
30813079
else
@@ -3251,9 +3249,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
32513249
if (err)
32523250
goto err_odp;
32533251

3252+
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3253+
if (!dev->mdev->priv.uar)
3254+
goto err_q_cnt;
3255+
3256+
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3257+
if (err)
3258+
goto err_uar_page;
3259+
3260+
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
3261+
if (err)
3262+
goto err_bfreg;
3263+
32543264
err = ib_register_device(&dev->ib_dev, NULL);
32553265
if (err)
3256-
goto err_q_cnt;
3266+
goto err_fp_bfreg;
32573267

32583268
err = create_umr_res(dev);
32593269
if (err)
@@ -3276,6 +3286,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
32763286
err_dev:
32773287
ib_unregister_device(&dev->ib_dev);
32783288

3289+
err_fp_bfreg:
3290+
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3291+
3292+
err_bfreg:
3293+
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3294+
3295+
err_uar_page:
3296+
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3297+
32793298
err_q_cnt:
32803299
mlx5_ib_dealloc_q_counters(dev);
32813300

@@ -3307,6 +3326,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
33073326

33083327
mlx5_remove_netdev_notifier(dev);
33093328
ib_unregister_device(&dev->ib_dev);
3329+
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3330+
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3331+
mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
33103332
mlx5_ib_dealloc_q_counters(dev);
33113333
destroy_umrc_res(dev);
33123334
mlx5_ib_odp_remove_one(dev);

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,12 @@ struct mlx5_ib_raw_packet_qp {
324324
struct mlx5_ib_rq rq;
325325
};
326326

327+
struct mlx5_bf {
328+
int buf_size;
329+
unsigned long offset;
330+
struct mlx5_sq_bfreg *bfreg;
331+
};
332+
327333
struct mlx5_ib_qp {
328334
struct ib_qp ibqp;
329335
union {
@@ -349,7 +355,7 @@ struct mlx5_ib_qp {
349355
int wq_sig;
350356
int scat_cqe;
351357
int max_inline_data;
352-
struct mlx5_bf *bf;
358+
struct mlx5_bf bf;
353359
int has_rq;
354360

355361
/* only for user space QPs. For kernel
@@ -591,7 +597,6 @@ struct mlx5_ib_dev {
591597
struct ib_device ib_dev;
592598
struct mlx5_core_dev *mdev;
593599
struct mlx5_roce roce;
594-
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
595600
int num_ports;
596601
/* serialize update of capability mask
597602
*/
@@ -621,6 +626,8 @@ struct mlx5_ib_dev {
621626
struct list_head qp_list;
622627
/* Array with num_ports elements */
623628
struct mlx5_ib_port *port;
629+
struct mlx5_sq_bfreg bfreg;
630+
struct mlx5_sq_bfreg fp_bfreg;
624631
};
625632

626633
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 14 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -909,36 +909,28 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
909909
u32 **in, int *inlen,
910910
struct mlx5_ib_qp_base *base)
911911
{
912-
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
913-
struct mlx5_bfreg_info *bfregi;
914912
int uar_index;
915913
void *qpc;
916-
int bfregn;
917914
int err;
918915

919-
bfregi = &dev->mdev->priv.bfregi;
920916
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
921917
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
922918
IB_QP_CREATE_IPOIB_UD_LSO |
923919
mlx5_ib_create_qp_sqpn_qp1()))
924920
return -EINVAL;
925921

926922
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
927-
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
928-
929-
bfregn = alloc_bfreg(bfregi, lc);
930-
if (bfregn < 0) {
931-
mlx5_ib_dbg(dev, "\n");
932-
return -ENOMEM;
933-
}
923+
qp->bf.bfreg = &dev->fp_bfreg;
924+
else
925+
qp->bf.bfreg = &dev->bfreg;
934926

935-
qp->bf = &bfregi->bfs[bfregn];
936-
uar_index = qp->bf->uar->index;
927+
qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
928+
uar_index = qp->bf.bfreg->index;
937929

938930
err = calc_sq_size(dev, init_attr, qp);
939931
if (err < 0) {
940932
mlx5_ib_dbg(dev, "err %d\n", err);
941-
goto err_bfreg;
933+
return err;
942934
}
943935

944936
qp->rq.offset = 0;
@@ -948,7 +940,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
948940
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
949941
if (err) {
950942
mlx5_ib_dbg(dev, "err %d\n", err);
951-
goto err_bfreg;
943+
return err;
952944
}
953945

954946
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
@@ -1010,9 +1002,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
10101002

10111003
err_buf:
10121004
mlx5_buf_free(dev->mdev, &qp->buf);
1013-
1014-
err_bfreg:
1015-
free_bfreg(&dev->mdev->priv.bfregi, bfregn);
10161005
return err;
10171006
}
10181007

@@ -1025,7 +1014,6 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
10251014
kfree(qp->rq.wrid);
10261015
mlx5_db_free(dev->mdev, &qp->db);
10271016
mlx5_buf_free(dev->mdev, &qp->buf);
1028-
free_bfreg(&dev->mdev->priv.bfregi, qp->bf->bfregn);
10291017
}
10301018

10311019
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -3744,24 +3732,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
37443732
}
37453733
}
37463734

3747-
static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
3748-
unsigned bytecnt, struct mlx5_ib_qp *qp)
3749-
{
3750-
while (bytecnt > 0) {
3751-
__iowrite64_copy(dst++, src++, 8);
3752-
__iowrite64_copy(dst++, src++, 8);
3753-
__iowrite64_copy(dst++, src++, 8);
3754-
__iowrite64_copy(dst++, src++, 8);
3755-
__iowrite64_copy(dst++, src++, 8);
3756-
__iowrite64_copy(dst++, src++, 8);
3757-
__iowrite64_copy(dst++, src++, 8);
3758-
__iowrite64_copy(dst++, src++, 8);
3759-
bytecnt -= 64;
3760-
if (unlikely(src == qp->sq.qend))
3761-
src = mlx5_get_send_wqe(qp, 0);
3762-
}
3763-
}
3764-
37653735
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
37663736
{
37673737
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
@@ -3857,7 +3827,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
38573827
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
38583828

38593829
qp = to_mqp(ibqp);
3860-
bf = qp->bf;
3830+
bf = &qp->bf;
38613831
qend = qp->sq.qend;
38623832

38633833
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -4130,28 +4100,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
41304100
* we hit doorbell */
41314101
wmb();
41324102

4133-
if (bf->need_lock)
4134-
spin_lock(&bf->lock);
4135-
else
4136-
__acquire(&bf->lock);
4137-
4138-
/* TBD enable WC */
4139-
if (0 && nreq == 1 && bf->bfregn && inl && size > 1 && size <= bf->buf_size / 16) {
4140-
mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
4141-
/* wc_wmb(); */
4142-
} else {
4143-
mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
4144-
MLX5_GET_DOORBELL_LOCK(&bf->lock32));
4145-
/* Make sure doorbells don't leak out of SQ spinlock
4146-
* and reach the HCA out of order.
4147-
*/
4148-
mmiowb();
4149-
}
4103+
/* currently we support only regular doorbells */
4104+
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
4105+
/* Make sure doorbells don't leak out of SQ spinlock
4106+
* and reach the HCA out of order.
4107+
*/
4108+
mmiowb();
41504109
bf->offset ^= bf->buf_size;
4151-
if (bf->need_lock)
4152-
spin_unlock(&bf->lock);
4153-
else
4154-
__release(&bf->lock);
41554110
}
41564111

41574112
spin_unlock_irqrestore(&qp->sq.lock, flags);

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -832,7 +832,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
832832
struct mlx5_core_cq *mcq;
833833

834834
mcq = &cq->mcq;
835-
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
835+
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
836836
}
837837

838838
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)

drivers/net/ethernet/mellanox/mlx5/core/main.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -913,8 +913,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
913913
goto out;
914914
}
915915

916-
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
917-
918916
err = mlx5_init_cq_table(dev);
919917
if (err) {
920918
dev_err(&pdev->dev, "failed to initialize cq table\n");
@@ -1099,16 +1097,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
10991097
goto err_disable_msix;
11001098
}
11011099

1102-
err = mlx5_alloc_bfregs(dev, &priv->bfregi);
1103-
if (err) {
1104-
dev_err(&pdev->dev, "Failed allocating uuars, aborting\n");
1105-
goto err_uar_cleanup;
1106-
}
1107-
11081100
err = mlx5_start_eqs(dev);
11091101
if (err) {
11101102
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1111-
goto err_free_uar;
1103+
goto err_put_uars;
11121104
}
11131105

11141106
err = alloc_comp_eqs(dev);
@@ -1174,10 +1166,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
11741166
err_stop_eqs:
11751167
mlx5_stop_eqs(dev);
11761168

1177-
err_free_uar:
1178-
mlx5_free_bfregs(dev, &priv->bfregi);
1179-
1180-
err_uar_cleanup:
1169+
err_put_uars:
11811170
mlx5_put_uars_page(dev, priv->uar);
11821171

11831172
err_disable_msix:
@@ -1238,7 +1227,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
12381227
mlx5_irq_clear_affinity_hints(dev);
12391228
free_comp_eqs(dev);
12401229
mlx5_stop_eqs(dev);
1241-
mlx5_free_bfregs(dev, &priv->bfregi);
12421230
mlx5_put_uars_page(dev, priv->uar);
12431231
mlx5_disable_msix(dev);
12441232
if (cleanup)

0 commit comments

Comments
 (0)