@@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
92
92
return & sq -> dma_fifo [i & sq -> dma_fifo_mask ];
93
93
}
94
94
95
- static void mlx5e_dma_unmap_wqe_err (struct mlx5e_sq * sq , struct sk_buff * skb )
95
+ static void mlx5e_dma_unmap_wqe_err (struct mlx5e_sq * sq , u8 num_dma )
96
96
{
97
97
int i ;
98
98
99
- for (i = 0 ; i < MLX5E_TX_SKB_CB ( skb ) -> num_dma ; i ++ ) {
99
+ for (i = 0 ; i < num_dma ; i ++ ) {
100
100
struct mlx5e_sq_dma * last_pushed_dma =
101
101
mlx5e_dma_get (sq , -- sq -> dma_fifo_pc );
102
102
@@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
139
139
return MLX5E_MIN_INLINE ;
140
140
}
141
141
142
- static inline void mlx5e_insert_vlan (void * start , struct sk_buff * skb , u16 ihs )
142
+ static inline void mlx5e_tx_skb_pull_inline (unsigned char * * skb_data ,
143
+ unsigned int * skb_len ,
144
+ unsigned int len )
145
+ {
146
+ * skb_len -= len ;
147
+ * skb_data += len ;
148
+ }
149
+
150
+ static inline void mlx5e_insert_vlan (void * start , struct sk_buff * skb , u16 ihs ,
151
+ unsigned char * * skb_data ,
152
+ unsigned int * skb_len )
143
153
{
144
154
struct vlan_ethhdr * vhdr = (struct vlan_ethhdr * )start ;
145
155
int cpy1_sz = 2 * ETH_ALEN ;
146
156
int cpy2_sz = ihs - cpy1_sz ;
147
157
148
- skb_copy_from_linear_data ( skb , vhdr , cpy1_sz );
149
- skb_pull_inline ( skb , cpy1_sz );
158
+ memcpy ( vhdr , * skb_data , cpy1_sz );
159
+ mlx5e_tx_skb_pull_inline ( skb_data , skb_len , cpy1_sz );
150
160
vhdr -> h_vlan_proto = skb -> vlan_proto ;
151
161
vhdr -> h_vlan_TCI = cpu_to_be16 (skb_vlan_tag_get (skb ));
152
- skb_copy_from_linear_data (skb , & vhdr -> h_vlan_encapsulated_proto ,
153
- cpy2_sz );
154
- skb_pull_inline (skb , cpy2_sz );
162
+ memcpy (& vhdr -> h_vlan_encapsulated_proto , * skb_data , cpy2_sz );
163
+ mlx5e_tx_skb_pull_inline (skb_data , skb_len , cpy2_sz );
155
164
}
156
165
157
166
static netdev_tx_t mlx5e_sq_xmit (struct mlx5e_sq * sq , struct sk_buff * skb )
@@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
160
169
161
170
u16 pi = sq -> pc & wq -> sz_m1 ;
162
171
struct mlx5e_tx_wqe * wqe = mlx5_wq_cyc_get_wqe (wq , pi );
172
+ struct mlx5e_tx_wqe_info * wi = & sq -> wqe_info [pi ];
163
173
164
174
struct mlx5_wqe_ctrl_seg * cseg = & wqe -> ctrl ;
165
175
struct mlx5_wqe_eth_seg * eseg = & wqe -> eth ;
166
176
struct mlx5_wqe_data_seg * dseg ;
167
177
178
+ unsigned char * skb_data = skb -> data ;
179
+ unsigned int skb_len = skb -> len ;
168
180
u8 opcode = MLX5_OPCODE_SEND ;
169
181
dma_addr_t dma_addr = 0 ;
170
182
bool bf = false;
@@ -192,25 +204,25 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
192
204
opcode = MLX5_OPCODE_LSO ;
193
205
ihs = skb_transport_offset (skb ) + tcp_hdrlen (skb );
194
206
payload_len = skb -> len - ihs ;
195
- MLX5E_TX_SKB_CB ( skb ) -> num_bytes = skb -> len +
196
- (skb_shinfo (skb )-> gso_segs - 1 ) * ihs ;
207
+ wi -> num_bytes = skb -> len +
208
+ (skb_shinfo (skb )-> gso_segs - 1 ) * ihs ;
197
209
sq -> stats .tso_packets ++ ;
198
210
sq -> stats .tso_bytes += payload_len ;
199
211
} else {
200
212
bf = sq -> bf_budget &&
201
213
!skb -> xmit_more &&
202
214
!skb_shinfo (skb )-> nr_frags ;
203
215
ihs = mlx5e_get_inline_hdr_size (sq , skb , bf );
204
- MLX5E_TX_SKB_CB (skb )-> num_bytes = max_t (unsigned int , skb -> len ,
205
- ETH_ZLEN );
216
+ wi -> num_bytes = max_t (unsigned int , skb -> len , ETH_ZLEN );
206
217
}
207
218
208
219
if (skb_vlan_tag_present (skb )) {
209
- mlx5e_insert_vlan (eseg -> inline_hdr_start , skb , ihs );
220
+ mlx5e_insert_vlan (eseg -> inline_hdr_start , skb , ihs , & skb_data ,
221
+ & skb_len );
210
222
ihs += VLAN_HLEN ;
211
223
} else {
212
- skb_copy_from_linear_data ( skb , eseg -> inline_hdr_start , ihs );
213
- skb_pull_inline ( skb , ihs );
224
+ memcpy ( eseg -> inline_hdr_start , skb_data , ihs );
225
+ mlx5e_tx_skb_pull_inline ( & skb_data , & skb_len , ihs );
214
226
}
215
227
216
228
eseg -> inline_hdr_sz = cpu_to_be16 (ihs );
@@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
220
232
MLX5_SEND_WQE_DS );
221
233
dseg = (struct mlx5_wqe_data_seg * )cseg + ds_cnt ;
222
234
223
- MLX5E_TX_SKB_CB ( skb ) -> num_dma = 0 ;
235
+ wi -> num_dma = 0 ;
224
236
225
- headlen = skb_headlen ( skb ) ;
237
+ headlen = skb_len - skb -> data_len ;
226
238
if (headlen ) {
227
- dma_addr = dma_map_single (sq -> pdev , skb -> data , headlen ,
239
+ dma_addr = dma_map_single (sq -> pdev , skb_data , headlen ,
228
240
DMA_TO_DEVICE );
229
241
if (unlikely (dma_mapping_error (sq -> pdev , dma_addr )))
230
242
goto dma_unmap_wqe_err ;
@@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
234
246
dseg -> byte_count = cpu_to_be32 (headlen );
235
247
236
248
mlx5e_dma_push (sq , dma_addr , headlen , MLX5E_DMA_MAP_SINGLE );
237
- MLX5E_TX_SKB_CB ( skb ) -> num_dma ++ ;
249
+ wi -> num_dma ++ ;
238
250
239
251
dseg ++ ;
240
252
}
@@ -253,23 +265,22 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
253
265
dseg -> byte_count = cpu_to_be32 (fsz );
254
266
255
267
mlx5e_dma_push (sq , dma_addr , fsz , MLX5E_DMA_MAP_PAGE );
256
- MLX5E_TX_SKB_CB ( skb ) -> num_dma ++ ;
268
+ wi -> num_dma ++ ;
257
269
258
270
dseg ++ ;
259
271
}
260
272
261
- ds_cnt += MLX5E_TX_SKB_CB ( skb ) -> num_dma ;
273
+ ds_cnt += wi -> num_dma ;
262
274
263
275
cseg -> opmod_idx_opcode = cpu_to_be32 ((sq -> pc << 8 ) | opcode );
264
276
cseg -> qpn_ds = cpu_to_be32 ((sq -> sqn << 8 ) | ds_cnt );
265
277
266
278
sq -> skb [pi ] = skb ;
267
279
268
- MLX5E_TX_SKB_CB (skb )-> num_wqebbs = DIV_ROUND_UP (ds_cnt ,
269
- MLX5_SEND_WQEBB_NUM_DS );
270
- sq -> pc += MLX5E_TX_SKB_CB (skb )-> num_wqebbs ;
280
+ wi -> num_wqebbs = DIV_ROUND_UP (ds_cnt , MLX5_SEND_WQEBB_NUM_DS );
281
+ sq -> pc += wi -> num_wqebbs ;
271
282
272
- netdev_tx_sent_queue (sq -> txq , MLX5E_TX_SKB_CB ( skb ) -> num_bytes );
283
+ netdev_tx_sent_queue (sq -> txq , wi -> num_bytes );
273
284
274
285
if (unlikely (!mlx5e_sq_has_room_for (sq , MLX5E_SQ_STOP_ROOM ))) {
275
286
netif_tx_stop_queue (sq -> txq );
@@ -280,7 +291,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
280
291
int bf_sz = 0 ;
281
292
282
293
if (bf && sq -> uar_bf_map )
283
- bf_sz = MLX5E_TX_SKB_CB ( skb ) -> num_wqebbs << 3 ;
294
+ bf_sz = wi -> num_wqebbs << 3 ;
284
295
285
296
cseg -> fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE ;
286
297
mlx5e_tx_notify_hw (sq , wqe , bf_sz );
@@ -297,7 +308,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
297
308
298
309
dma_unmap_wqe_err :
299
310
sq -> stats .dropped ++ ;
300
- mlx5e_dma_unmap_wqe_err (sq , skb );
311
+ mlx5e_dma_unmap_wqe_err (sq , wi -> num_dma );
301
312
302
313
dev_kfree_skb_any (skb );
303
314
@@ -352,6 +363,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
352
363
wqe_counter = be16_to_cpu (cqe -> wqe_counter );
353
364
354
365
do {
366
+ struct mlx5e_tx_wqe_info * wi ;
355
367
struct sk_buff * skb ;
356
368
u16 ci ;
357
369
int j ;
@@ -360,23 +372,24 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
360
372
361
373
ci = sqcc & sq -> wq .sz_m1 ;
362
374
skb = sq -> skb [ci ];
375
+ wi = & sq -> wqe_info [ci ];
363
376
364
377
if (unlikely (!skb )) { /* nop */
365
378
sq -> stats .nop ++ ;
366
379
sqcc ++ ;
367
380
continue ;
368
381
}
369
382
370
- for (j = 0 ; j < MLX5E_TX_SKB_CB ( skb ) -> num_dma ; j ++ ) {
383
+ for (j = 0 ; j < wi -> num_dma ; j ++ ) {
371
384
struct mlx5e_sq_dma * dma =
372
385
mlx5e_dma_get (sq , dma_fifo_cc ++ );
373
386
374
387
mlx5e_tx_dma_unmap (sq -> pdev , dma );
375
388
}
376
389
377
390
npkts ++ ;
378
- nbytes += MLX5E_TX_SKB_CB ( skb ) -> num_bytes ;
379
- sqcc += MLX5E_TX_SKB_CB ( skb ) -> num_wqebbs ;
391
+ nbytes += wi -> num_bytes ;
392
+ sqcc += wi -> num_wqebbs ;
380
393
dev_kfree_skb (skb );
381
394
} while (!last_wqe );
382
395
}
0 commit comments