@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
98
98
case OP (RDMA_READ_RESPONSE_LAST ):
99
99
case OP (RDMA_READ_RESPONSE_ONLY ):
100
100
case OP (ATOMIC_ACKNOWLEDGE ):
101
- qp -> s_ack_state = OP (ACKNOWLEDGE );
101
+ /*
102
+ * We can increment the tail pointer now that the last
103
+ * response has been sent instead of only being
104
+ * constructed.
105
+ */
106
+ if (++ qp -> s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC )
107
+ qp -> s_tail_ack_queue = 0 ;
102
108
/* FALLTHROUGH */
109
+ case OP (SEND_ONLY ):
103
110
case OP (ACKNOWLEDGE ):
104
111
/* Check for no next entry in the queue. */
105
112
if (qp -> r_head_ack_queue == qp -> s_tail_ack_queue ) {
106
113
if (qp -> s_flags & IPATH_S_ACK_PENDING )
107
114
goto normal ;
115
+ qp -> s_ack_state = OP (ACKNOWLEDGE );
108
116
goto bail ;
109
117
}
110
118
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
117
125
if (len > pmtu ) {
118
126
len = pmtu ;
119
127
qp -> s_ack_state = OP (RDMA_READ_RESPONSE_FIRST );
120
- } else {
128
+ } else
121
129
qp -> s_ack_state = OP (RDMA_READ_RESPONSE_ONLY );
122
- if (++ qp -> s_tail_ack_queue >
123
- IPATH_MAX_RDMA_ATOMIC )
124
- qp -> s_tail_ack_queue = 0 ;
125
- }
126
130
ohdr -> u .aeth = ipath_compute_aeth (qp );
127
131
hwords ++ ;
128
132
qp -> s_ack_rdma_psn = e -> psn ;
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
139
143
cpu_to_be32 (e -> atomic_data );
140
144
hwords += sizeof (ohdr -> u .at ) / sizeof (u32 );
141
145
bth2 = e -> psn ;
142
- if (++ qp -> s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC )
143
- qp -> s_tail_ack_queue = 0 ;
144
146
}
145
147
bth0 = qp -> s_ack_state << 24 ;
146
148
break ;
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
156
158
ohdr -> u .aeth = ipath_compute_aeth (qp );
157
159
hwords ++ ;
158
160
qp -> s_ack_state = OP (RDMA_READ_RESPONSE_LAST );
159
- if (++ qp -> s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC )
160
- qp -> s_tail_ack_queue = 0 ;
161
161
}
162
162
bth0 = qp -> s_ack_state << 24 ;
163
163
bth2 = qp -> s_ack_rdma_psn ++ & IPATH_PSN_MASK ;
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
171
171
* the ACK before setting s_ack_state to ACKNOWLEDGE
172
172
* (see above).
173
173
*/
174
- qp -> s_ack_state = OP (ATOMIC_ACKNOWLEDGE );
174
+ qp -> s_ack_state = OP (SEND_ONLY );
175
175
qp -> s_flags &= ~IPATH_S_ACK_PENDING ;
176
176
qp -> s_cur_sge = NULL ;
177
177
if (qp -> s_nak_state )
@@ -223,7 +223,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
223
223
/* Sending responses has higher priority over sending requests. */
224
224
if ((qp -> r_head_ack_queue != qp -> s_tail_ack_queue ||
225
225
(qp -> s_flags & IPATH_S_ACK_PENDING ) ||
226
- qp -> s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE ) &&
226
+ qp -> s_ack_state != OP ( ACKNOWLEDGE ) ) &&
227
227
ipath_make_rc_ack (qp , ohdr , pmtu , bth0p , bth2p ))
228
228
goto done ;
229
229
@@ -585,7 +585,9 @@ static void send_rc_ack(struct ipath_qp *qp)
585
585
unsigned long flags ;
586
586
587
587
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
588
- if (qp -> r_head_ack_queue != qp -> s_tail_ack_queue )
588
+ if (qp -> r_head_ack_queue != qp -> s_tail_ack_queue ||
589
+ (qp -> s_flags & IPATH_S_ACK_PENDING ) ||
590
+ qp -> s_ack_state != OP (ACKNOWLEDGE ))
589
591
goto queue_ack ;
590
592
591
593
/* Construct the header. */
0 commit comments