@@ -67,8 +67,6 @@ static u32 mask_generation(u32 a)
67
67
#define TID_RDMA_DESTQP_FLOW_SHIFT 11
68
68
#define TID_RDMA_DESTQP_FLOW_MASK 0x1f
69
69
70
- #define TID_FLOW_SW_PSN BIT(0)
71
-
72
70
#define TID_OPFN_QP_CTXT_MASK 0xff
73
71
#define TID_OPFN_QP_CTXT_SHIFT 56
74
72
#define TID_OPFN_QP_KDETH_MASK 0xff
@@ -777,7 +775,6 @@ int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
777
775
rcd -> flows [fs -> index ].generation = fs -> generation ;
778
776
fs -> generation = kern_setup_hw_flow (rcd , fs -> index );
779
777
fs -> psn = 0 ;
780
- fs -> flags = 0 ;
781
778
dequeue_tid_waiter (rcd , & rcd -> flow_queue , qp );
782
779
/* get head before dropping lock */
783
780
fqp = first_qp (rcd , & rcd -> flow_queue );
@@ -1808,6 +1805,7 @@ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1808
1805
goto done ;
1809
1806
1810
1807
hfi1_kern_clear_hw_flow (req -> rcd , qp );
1808
+ qpriv -> s_flags &= ~HFI1_R_TID_SW_PSN ;
1811
1809
req -> state = TID_REQUEST_ACTIVE ;
1812
1810
}
1813
1811
@@ -2476,8 +2474,13 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
2476
2474
2477
2475
flow = & req -> flows [req -> clear_tail ];
2478
2476
/* When header suppression is disabled */
2479
- if (cmp_psn (ipsn , flow -> flow_state .ib_lpsn ))
2477
+ if (cmp_psn (ipsn , flow -> flow_state .ib_lpsn )) {
2478
+ if (cmp_psn (kpsn , flow -> flow_state .r_next_psn ))
2479
+ goto ack_done ;
2480
+ flow -> flow_state .r_next_psn = mask_psn (kpsn + 1 );
2480
2481
goto ack_done ;
2482
+ }
2483
+ flow -> flow_state .r_next_psn = mask_psn (kpsn + 1 );
2481
2484
req -> ack_pending -- ;
2482
2485
priv -> pending_tid_r_segs -- ;
2483
2486
qp -> s_num_rd_atomic -- ;
@@ -2519,6 +2522,7 @@ void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
2519
2522
req -> comp_seg == req -> cur_seg ) ||
2520
2523
priv -> tid_r_comp == priv -> tid_r_reqs ) {
2521
2524
hfi1_kern_clear_hw_flow (priv -> rcd , qp );
2525
+ priv -> s_flags &= ~HFI1_R_TID_SW_PSN ;
2522
2526
if (req -> state == TID_REQUEST_SYNC )
2523
2527
req -> state = TID_REQUEST_ACTIVE ;
2524
2528
}
@@ -2768,9 +2772,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2768
2772
rvt_rc_error (qp , IB_WC_LOC_QP_OP_ERR );
2769
2773
return ret ;
2770
2774
}
2771
- if (priv -> flow_state . flags & TID_FLOW_SW_PSN ) {
2775
+ if (priv -> s_flags & HFI1_R_TID_SW_PSN ) {
2772
2776
diff = cmp_psn (psn ,
2773
- priv -> flow_state .r_next_psn );
2777
+ flow -> flow_state .r_next_psn );
2774
2778
if (diff > 0 ) {
2775
2779
if (!(qp -> r_flags & RVT_R_RDMAR_SEQ ))
2776
2780
restart_tid_rdma_read_req (rcd ,
@@ -2806,14 +2810,15 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2806
2810
qp -> r_flags &=
2807
2811
~RVT_R_RDMAR_SEQ ;
2808
2812
}
2809
- priv -> flow_state .r_next_psn ++ ;
2813
+ flow -> flow_state .r_next_psn =
2814
+ mask_psn (psn + 1 );
2810
2815
} else {
2811
2816
u32 last_psn ;
2812
2817
2813
2818
last_psn = read_r_next_psn (dd , rcd -> ctxt ,
2814
2819
flow -> idx );
2815
- priv -> flow_state .r_next_psn = last_psn ;
2816
- priv -> flow_state . flags |= TID_FLOW_SW_PSN ;
2820
+ flow -> flow_state .r_next_psn = last_psn ;
2821
+ priv -> s_flags |= HFI1_R_TID_SW_PSN ;
2817
2822
/*
2818
2823
* If no request has been restarted yet,
2819
2824
* restart the current one.
@@ -2878,6 +2883,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2878
2883
struct rvt_ack_entry * e ;
2879
2884
struct tid_rdma_request * req ;
2880
2885
struct tid_rdma_flow * flow ;
2886
+ int diff = 0 ;
2881
2887
2882
2888
trace_hfi1_msg_handle_kdeth_eflags (NULL , "Kdeth error: rhf " ,
2883
2889
packet -> rhf );
@@ -2977,10 +2983,12 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2977
2983
* mismatch could be due to packets that were
2978
2984
* already in flight.
2979
2985
*/
2980
- if (psn != flow -> flow_state .r_next_psn ) {
2981
- psn = flow -> flow_state .r_next_psn ;
2986
+ diff = cmp_psn (psn ,
2987
+ flow -> flow_state .r_next_psn );
2988
+ if (diff > 0 )
2982
2989
goto nak_psn ;
2983
- }
2990
+ else if (diff < 0 )
2991
+ break ;
2984
2992
2985
2993
qpriv -> s_nak_state = 0 ;
2986
2994
/*
@@ -2991,8 +2999,10 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2991
2999
if (psn == full_flow_psn (flow ,
2992
3000
flow -> flow_state .lpsn ))
2993
3001
ret = false;
3002
+ flow -> flow_state .r_next_psn =
3003
+ mask_psn (psn + 1 );
2994
3004
qpriv -> r_next_psn_kdeth =
2995
- ++ flow -> flow_state .r_next_psn ;
3005
+ flow -> flow_state .r_next_psn ;
2996
3006
}
2997
3007
break ;
2998
3008
@@ -3497,8 +3507,10 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3497
3507
if (qpriv -> r_tid_alloc == qpriv -> r_tid_head ) {
3498
3508
/* If all data has been received, clear the flow */
3499
3509
if (qpriv -> flow_state .index < RXE_NUM_TID_FLOWS &&
3500
- !qpriv -> alloc_w_segs )
3510
+ !qpriv -> alloc_w_segs ) {
3501
3511
hfi1_kern_clear_hw_flow (rcd , qp );
3512
+ qpriv -> s_flags &= ~HFI1_R_TID_SW_PSN ;
3513
+ }
3502
3514
break ;
3503
3515
}
3504
3516
@@ -3524,8 +3536,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3524
3536
if (qpriv -> sync_pt && !qpriv -> alloc_w_segs ) {
3525
3537
hfi1_kern_clear_hw_flow (rcd , qp );
3526
3538
qpriv -> sync_pt = false;
3527
- if (qpriv -> s_flags & HFI1_R_TID_SW_PSN )
3528
- qpriv -> s_flags &= ~HFI1_R_TID_SW_PSN ;
3539
+ qpriv -> s_flags &= ~HFI1_R_TID_SW_PSN ;
3529
3540
}
3530
3541
3531
3542
/* Allocate flow if we don't have one */
@@ -4299,7 +4310,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
4299
4310
if (cmp_psn (psn , full_flow_psn (flow , flow -> flow_state .lpsn ))) {
4300
4311
if (cmp_psn (psn , flow -> flow_state .r_next_psn ))
4301
4312
goto send_nak ;
4302
- flow -> flow_state .r_next_psn ++ ;
4313
+ flow -> flow_state .r_next_psn = mask_psn ( psn + 1 ) ;
4303
4314
goto exit ;
4304
4315
}
4305
4316
flow -> flow_state .r_next_psn = mask_psn (psn + 1 );
0 commit comments