@@ -1574,6 +1574,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1574
1574
caps -> mtt_ba_pg_sz = 0 ;
1575
1575
caps -> mtt_buf_pg_sz = 0 ;
1576
1576
caps -> mtt_hop_num = HNS_ROCE_MTT_HOP_NUM ;
1577
+ caps -> wqe_sq_hop_num = 2 ;
1578
+ caps -> wqe_sge_hop_num = 1 ;
1579
+ caps -> wqe_rq_hop_num = 2 ;
1577
1580
caps -> cqe_ba_pg_sz = 0 ;
1578
1581
caps -> cqe_buf_pg_sz = 0 ;
1579
1582
caps -> cqe_hop_num = HNS_ROCE_CQE_HOP_NUM ;
@@ -3021,7 +3024,6 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3021
3024
}
3022
3025
3023
3026
static int hns_roce_v2_qp_modify (struct hns_roce_dev * hr_dev ,
3024
- struct hns_roce_mtt * mtt ,
3025
3027
enum ib_qp_state cur_state ,
3026
3028
enum ib_qp_state new_state ,
3027
3029
struct hns_roce_v2_qp_context * context ,
@@ -3517,6 +3519,31 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
3517
3519
}
3518
3520
}
3519
3521
3522
+ static bool check_wqe_rq_mtt_count (struct hns_roce_dev * hr_dev ,
3523
+ struct hns_roce_qp * hr_qp , int mtt_cnt ,
3524
+ u32 page_size )
3525
+ {
3526
+ struct device * dev = hr_dev -> dev ;
3527
+
3528
+ if (hr_qp -> rq .wqe_cnt < 1 )
3529
+ return true;
3530
+
3531
+ if (mtt_cnt < 1 ) {
3532
+ dev_err (dev , "qp(0x%lx) rqwqe buf ba find failed\n" ,
3533
+ hr_qp -> qpn );
3534
+ return false;
3535
+ }
3536
+
3537
+ if (mtt_cnt < MTT_MIN_COUNT &&
3538
+ (hr_qp -> rq .offset + page_size ) < hr_qp -> buff_size ) {
3539
+ dev_err (dev , "qp(0x%lx) next rqwqe buf ba find failed\n" ,
3540
+ hr_qp -> qpn );
3541
+ return false;
3542
+ }
3543
+
3544
+ return true;
3545
+ }
3546
+
3520
3547
static int modify_qp_init_to_rtr (struct ib_qp * ibqp ,
3521
3548
const struct ib_qp_attr * attr , int attr_mask ,
3522
3549
struct hns_roce_v2_qp_context * context ,
@@ -3526,25 +3553,27 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3526
3553
struct hns_roce_dev * hr_dev = to_hr_dev (ibqp -> device );
3527
3554
struct hns_roce_qp * hr_qp = to_hr_qp (ibqp );
3528
3555
struct device * dev = hr_dev -> dev ;
3556
+ u64 mtts [MTT_MIN_COUNT ] = { 0 };
3529
3557
dma_addr_t dma_handle_3 ;
3530
3558
dma_addr_t dma_handle_2 ;
3531
- dma_addr_t dma_handle ;
3559
+ u64 wqe_sge_ba ;
3532
3560
u32 page_size ;
3533
3561
u8 port_num ;
3534
3562
u64 * mtts_3 ;
3535
3563
u64 * mtts_2 ;
3536
- u64 * mtts ;
3564
+ int count ;
3537
3565
u8 * dmac ;
3538
3566
u8 * smac ;
3539
3567
int port ;
3540
3568
3541
3569
/* Search qp buf's mtts */
3542
- mtts = hns_roce_table_find (hr_dev , & hr_dev -> mr_table .mtt_table ,
3543
- hr_qp -> mtt .first_seg , & dma_handle );
3544
- if (!mtts ) {
3545
- dev_err (dev , "qp buf pa find failed\n" );
3546
- return - EINVAL ;
3547
- }
3570
+ page_size = 1 << (hr_dev -> caps .mtt_buf_pg_sz + PAGE_SHIFT );
3571
+ count = hns_roce_mtr_find (hr_dev , & hr_qp -> mtr ,
3572
+ hr_qp -> rq .offset / page_size , mtts ,
3573
+ MTT_MIN_COUNT , & wqe_sge_ba );
3574
+ if (!ibqp -> srq )
3575
+ if (!check_wqe_rq_mtt_count (hr_dev , hr_qp , count , page_size ))
3576
+ return - EINVAL ;
3548
3577
3549
3578
/* Search IRRL's mtts */
3550
3579
mtts_2 = hns_roce_table_find (hr_dev , & hr_dev -> qp_table .irrl_table ,
@@ -3568,7 +3597,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3568
3597
}
3569
3598
3570
3599
dmac = (u8 * )attr -> ah_attr .roce .dmac ;
3571
- context -> wqe_sge_ba = (u32 )(dma_handle >> 3 );
3600
+ context -> wqe_sge_ba = (u32 )(wqe_sge_ba >> 3 );
3572
3601
qpc_mask -> wqe_sge_ba = 0 ;
3573
3602
3574
3603
/*
@@ -3578,39 +3607,40 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3578
3607
* 0 at the same time, else set them to 0x1.
3579
3608
*/
3580
3609
roce_set_field (context -> byte_12_sq_hop , V2_QPC_BYTE_12_WQE_SGE_BA_M ,
3581
- V2_QPC_BYTE_12_WQE_SGE_BA_S , dma_handle >> (32 + 3 ));
3610
+ V2_QPC_BYTE_12_WQE_SGE_BA_S , wqe_sge_ba >> (32 + 3 ));
3582
3611
roce_set_field (qpc_mask -> byte_12_sq_hop , V2_QPC_BYTE_12_WQE_SGE_BA_M ,
3583
3612
V2_QPC_BYTE_12_WQE_SGE_BA_S , 0 );
3584
3613
3585
3614
roce_set_field (context -> byte_12_sq_hop , V2_QPC_BYTE_12_SQ_HOP_NUM_M ,
3586
3615
V2_QPC_BYTE_12_SQ_HOP_NUM_S ,
3587
- hr_dev -> caps .mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3588
- 0 : hr_dev -> caps .mtt_hop_num );
3616
+ hr_dev -> caps .wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3617
+ 0 : hr_dev -> caps .wqe_sq_hop_num );
3589
3618
roce_set_field (qpc_mask -> byte_12_sq_hop , V2_QPC_BYTE_12_SQ_HOP_NUM_M ,
3590
3619
V2_QPC_BYTE_12_SQ_HOP_NUM_S , 0 );
3591
3620
3592
3621
roce_set_field (context -> byte_20_smac_sgid_idx ,
3593
3622
V2_QPC_BYTE_20_SGE_HOP_NUM_M ,
3594
3623
V2_QPC_BYTE_20_SGE_HOP_NUM_S ,
3595
- ((ibqp -> qp_type == IB_QPT_GSI ) || hr_qp -> sq .max_gs > 2 ) ?
3596
- hr_dev -> caps .mtt_hop_num : 0 );
3624
+ ((ibqp -> qp_type == IB_QPT_GSI ) ||
3625
+ hr_qp -> sq .max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ) ?
3626
+ hr_dev -> caps .wqe_sge_hop_num : 0 );
3597
3627
roce_set_field (qpc_mask -> byte_20_smac_sgid_idx ,
3598
3628
V2_QPC_BYTE_20_SGE_HOP_NUM_M ,
3599
3629
V2_QPC_BYTE_20_SGE_HOP_NUM_S , 0 );
3600
3630
3601
3631
roce_set_field (context -> byte_20_smac_sgid_idx ,
3602
3632
V2_QPC_BYTE_20_RQ_HOP_NUM_M ,
3603
3633
V2_QPC_BYTE_20_RQ_HOP_NUM_S ,
3604
- hr_dev -> caps .mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3605
- 0 : hr_dev -> caps .mtt_hop_num );
3634
+ hr_dev -> caps .wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3635
+ 0 : hr_dev -> caps .wqe_rq_hop_num );
3606
3636
roce_set_field (qpc_mask -> byte_20_smac_sgid_idx ,
3607
3637
V2_QPC_BYTE_20_RQ_HOP_NUM_M ,
3608
3638
V2_QPC_BYTE_20_RQ_HOP_NUM_S , 0 );
3609
3639
3610
3640
roce_set_field (context -> byte_16_buf_ba_pg_sz ,
3611
3641
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M ,
3612
3642
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S ,
3613
- hr_dev -> caps . mtt_ba_pg_sz + PG_SHIFT_OFFSET );
3643
+ hr_qp -> wqe_bt_pg_shift + PG_SHIFT_OFFSET );
3614
3644
roce_set_field (qpc_mask -> byte_16_buf_ba_pg_sz ,
3615
3645
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M ,
3616
3646
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S , 0 );
@@ -3623,29 +3653,24 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3623
3653
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M ,
3624
3654
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S , 0 );
3625
3655
3626
- page_size = 1 << (hr_dev -> caps .mtt_buf_pg_sz + PAGE_SHIFT );
3627
- context -> rq_cur_blk_addr = (u32 )(mtts [hr_qp -> rq .offset / page_size ]
3628
- >> PAGE_ADDR_SHIFT );
3656
+ context -> rq_cur_blk_addr = (u32 )(mtts [0 ] >> PAGE_ADDR_SHIFT );
3629
3657
qpc_mask -> rq_cur_blk_addr = 0 ;
3630
3658
3631
3659
roce_set_field (context -> byte_92_srq_info ,
3632
3660
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M ,
3633
3661
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S ,
3634
- mtts [hr_qp -> rq .offset / page_size ]
3635
- >> (32 + PAGE_ADDR_SHIFT ));
3662
+ mtts [0 ] >> (32 + PAGE_ADDR_SHIFT ));
3636
3663
roce_set_field (qpc_mask -> byte_92_srq_info ,
3637
3664
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M ,
3638
3665
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S , 0 );
3639
3666
3640
- context -> rq_nxt_blk_addr = (u32 )(mtts [hr_qp -> rq .offset / page_size + 1 ]
3641
- >> PAGE_ADDR_SHIFT );
3667
+ context -> rq_nxt_blk_addr = (u32 )(mtts [1 ] >> PAGE_ADDR_SHIFT );
3642
3668
qpc_mask -> rq_nxt_blk_addr = 0 ;
3643
3669
3644
3670
roce_set_field (context -> byte_104_rq_sge ,
3645
3671
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M ,
3646
3672
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S ,
3647
- mtts [hr_qp -> rq .offset / page_size + 1 ]
3648
- >> (32 + PAGE_ADDR_SHIFT ));
3673
+ mtts [1 ] >> (32 + PAGE_ADDR_SHIFT ));
3649
3674
roce_set_field (qpc_mask -> byte_104_rq_sge ,
3650
3675
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M ,
3651
3676
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S , 0 );
@@ -3773,18 +3798,30 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3773
3798
struct hns_roce_dev * hr_dev = to_hr_dev (ibqp -> device );
3774
3799
struct hns_roce_qp * hr_qp = to_hr_qp (ibqp );
3775
3800
struct device * dev = hr_dev -> dev ;
3776
- dma_addr_t dma_handle ;
3801
+ u64 sge_cur_blk = 0 ;
3802
+ u64 sq_cur_blk = 0 ;
3777
3803
u32 page_size ;
3778
- u64 * mtts ;
3804
+ int count ;
3779
3805
3780
3806
/* Search qp buf's mtts */
3781
- mtts = hns_roce_table_find (hr_dev , & hr_dev -> mr_table .mtt_table ,
3782
- hr_qp -> mtt .first_seg , & dma_handle );
3783
- if (!mtts ) {
3784
- dev_err (dev , "qp buf pa find failed\n" );
3807
+ count = hns_roce_mtr_find (hr_dev , & hr_qp -> mtr , 0 , & sq_cur_blk , 1 , NULL );
3808
+ if (count < 1 ) {
3809
+ dev_err (dev , "qp(0x%lx) buf pa find failed\n" , hr_qp -> qpn );
3785
3810
return - EINVAL ;
3786
3811
}
3787
3812
3813
+ if (hr_qp -> sge .offset ) {
3814
+ page_size = 1 << (hr_dev -> caps .mtt_buf_pg_sz + PAGE_SHIFT );
3815
+ count = hns_roce_mtr_find (hr_dev , & hr_qp -> mtr ,
3816
+ hr_qp -> sge .offset / page_size ,
3817
+ & sge_cur_blk , 1 , NULL );
3818
+ if (count < 1 ) {
3819
+ dev_err (dev , "qp(0x%lx) sge pa find failed\n" ,
3820
+ hr_qp -> qpn );
3821
+ return - EINVAL ;
3822
+ }
3823
+ }
3824
+
3788
3825
/* Not support alternate path and path migration */
3789
3826
if ((attr_mask & IB_QP_ALT_PATH ) ||
3790
3827
(attr_mask & IB_QP_PATH_MIG_STATE )) {
@@ -3798,38 +3835,37 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3798
3835
* we should set all bits of the relevant fields in context mask to
3799
3836
* 0 at the same time, else set them to 0x1.
3800
3837
*/
3801
- context -> sq_cur_blk_addr = (u32 )(mtts [ 0 ] >> PAGE_ADDR_SHIFT );
3838
+ context -> sq_cur_blk_addr = (u32 )(sq_cur_blk >> PAGE_ADDR_SHIFT );
3802
3839
roce_set_field (context -> byte_168_irrl_idx ,
3803
3840
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M ,
3804
3841
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S ,
3805
- mtts [ 0 ] >> (32 + PAGE_ADDR_SHIFT ));
3842
+ sq_cur_blk >> (32 + PAGE_ADDR_SHIFT ));
3806
3843
qpc_mask -> sq_cur_blk_addr = 0 ;
3807
3844
roce_set_field (qpc_mask -> byte_168_irrl_idx ,
3808
3845
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M ,
3809
3846
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S , 0 );
3810
3847
3811
- page_size = 1 << (hr_dev -> caps .mtt_buf_pg_sz + PAGE_SHIFT );
3812
3848
context -> sq_cur_sge_blk_addr = ((ibqp -> qp_type == IB_QPT_GSI ) ||
3813
3849
hr_qp -> sq .max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ) ?
3814
- ((u32 )(mtts [ hr_qp -> sge . offset / page_size ] >>
3850
+ ((u32 )(sge_cur_blk >>
3815
3851
PAGE_ADDR_SHIFT )) : 0 ;
3816
3852
roce_set_field (context -> byte_184_irrl_idx ,
3817
3853
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M ,
3818
3854
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S ,
3819
3855
((ibqp -> qp_type == IB_QPT_GSI ) || hr_qp -> sq .max_gs >
3820
3856
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ) ?
3821
- (mtts [ hr_qp -> sge . offset / page_size ] >>
3857
+ (sge_cur_blk >>
3822
3858
(32 + PAGE_ADDR_SHIFT )) : 0 );
3823
3859
qpc_mask -> sq_cur_sge_blk_addr = 0 ;
3824
3860
roce_set_field (qpc_mask -> byte_184_irrl_idx ,
3825
3861
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M ,
3826
3862
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S , 0 );
3827
3863
3828
- context -> rx_sq_cur_blk_addr = (u32 )(mtts [ 0 ] >> PAGE_ADDR_SHIFT );
3864
+ context -> rx_sq_cur_blk_addr = (u32 )(sq_cur_blk >> PAGE_ADDR_SHIFT );
3829
3865
roce_set_field (context -> byte_232_irrl_sge ,
3830
3866
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M ,
3831
3867
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S ,
3832
- mtts [ 0 ] >> (32 + PAGE_ADDR_SHIFT ));
3868
+ sq_cur_blk >> (32 + PAGE_ADDR_SHIFT ));
3833
3869
qpc_mask -> rx_sq_cur_blk_addr = 0 ;
3834
3870
roce_set_field (qpc_mask -> byte_232_irrl_sge ,
3835
3871
V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M ,
@@ -4230,7 +4266,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4230
4266
V2_QPC_BYTE_60_QP_ST_S , 0 );
4231
4267
4232
4268
/* SW pass context to HW */
4233
- ret = hns_roce_v2_qp_modify (hr_dev , & hr_qp -> mtt , cur_state , new_state ,
4269
+ ret = hns_roce_v2_qp_modify (hr_dev , cur_state , new_state ,
4234
4270
context , hr_qp );
4235
4271
if (ret ) {
4236
4272
dev_err (dev , "hns_roce_qp_modify failed(%d)\n" , ret );
0 commit comments