Skip to content

Commit 34802a4

Browse files
Achiad Shochatdavem330
authored andcommitted
net/mlx5e: Do not modify the TX SKB
If the SKB is cloned, or has an elevated users count, someone else can be looking at it at the same time. Signed-off-by: Achiad Shochat <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 33c1529 commit 34802a4

File tree

3 files changed

+49
-34
lines changed

3 files changed

+49
-34
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -328,14 +328,12 @@ struct mlx5e_rq {
328328
struct mlx5e_priv *priv;
329329
} ____cacheline_aligned_in_smp;
330330

331-
struct mlx5e_tx_skb_cb {
331+
struct mlx5e_tx_wqe_info {
332332
u32 num_bytes;
333333
u8 num_wqebbs;
334334
u8 num_dma;
335335
};
336336

337-
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
338-
339337
enum mlx5e_dma_map_type {
340338
MLX5E_DMA_MAP_SINGLE,
341339
MLX5E_DMA_MAP_PAGE
@@ -371,6 +369,7 @@ struct mlx5e_sq {
371369
/* pointers to per packet info: write@xmit, read@completion */
372370
struct sk_buff **skb;
373371
struct mlx5e_sq_dma *dma_fifo;
372+
struct mlx5e_tx_wqe_info *wqe_info;
374373

375374
/* read only */
376375
struct mlx5_wq_cyc wq;

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
507507

508508
static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
509509
{
510+
kfree(sq->wqe_info);
510511
kfree(sq->dma_fifo);
511512
kfree(sq->skb);
512513
}
@@ -519,8 +520,10 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
519520
sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
520521
sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
521522
numa);
523+
sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
524+
numa);
522525

523-
if (!sq->skb || !sq->dma_fifo) {
526+
if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
524527
mlx5e_free_sq_db(sq);
525528
return -ENOMEM;
526529
}

drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

Lines changed: 43 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
9292
return &sq->dma_fifo[i & sq->dma_fifo_mask];
9393
}
9494

95-
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
95+
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
9696
{
9797
int i;
9898

99-
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
99+
for (i = 0; i < num_dma; i++) {
100100
struct mlx5e_sq_dma *last_pushed_dma =
101101
mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102102

@@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
139139
return MLX5E_MIN_INLINE;
140140
}
141141

142-
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
142+
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
143+
unsigned int *skb_len,
144+
unsigned int len)
145+
{
146+
*skb_len -= len;
147+
*skb_data += len;
148+
}
149+
150+
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
151+
unsigned char **skb_data,
152+
unsigned int *skb_len)
143153
{
144154
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
145155
int cpy1_sz = 2 * ETH_ALEN;
146156
int cpy2_sz = ihs - cpy1_sz;
147157

148-
skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
149-
skb_pull_inline(skb, cpy1_sz);
158+
memcpy(vhdr, *skb_data, cpy1_sz);
159+
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
150160
vhdr->h_vlan_proto = skb->vlan_proto;
151161
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
152-
skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
153-
cpy2_sz);
154-
skb_pull_inline(skb, cpy2_sz);
162+
memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
163+
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
155164
}
156165

157166
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
@@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
160169

161170
u16 pi = sq->pc & wq->sz_m1;
162171
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
172+
struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
163173

164174
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
165175
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
166176
struct mlx5_wqe_data_seg *dseg;
167177

178+
unsigned char *skb_data = skb->data;
179+
unsigned int skb_len = skb->len;
168180
u8 opcode = MLX5_OPCODE_SEND;
169181
dma_addr_t dma_addr = 0;
170182
bool bf = false;
@@ -192,25 +204,25 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
192204
opcode = MLX5_OPCODE_LSO;
193205
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
194206
payload_len = skb->len - ihs;
195-
MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
196-
(skb_shinfo(skb)->gso_segs - 1) * ihs;
207+
wi->num_bytes = skb->len +
208+
(skb_shinfo(skb)->gso_segs - 1) * ihs;
197209
sq->stats.tso_packets++;
198210
sq->stats.tso_bytes += payload_len;
199211
} else {
200212
bf = sq->bf_budget &&
201213
!skb->xmit_more &&
202214
!skb_shinfo(skb)->nr_frags;
203215
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
204-
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
205-
ETH_ZLEN);
216+
wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
206217
}
207218

208219
if (skb_vlan_tag_present(skb)) {
209-
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
220+
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
221+
&skb_len);
210222
ihs += VLAN_HLEN;
211223
} else {
212-
skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
213-
skb_pull_inline(skb, ihs);
224+
memcpy(eseg->inline_hdr_start, skb_data, ihs);
225+
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
214226
}
215227

216228
eseg->inline_hdr_sz = cpu_to_be16(ihs);
@@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
220232
MLX5_SEND_WQE_DS);
221233
dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
222234

223-
MLX5E_TX_SKB_CB(skb)->num_dma = 0;
235+
wi->num_dma = 0;
224236

225-
headlen = skb_headlen(skb);
237+
headlen = skb_len - skb->data_len;
226238
if (headlen) {
227-
dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
239+
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
228240
DMA_TO_DEVICE);
229241
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
230242
goto dma_unmap_wqe_err;
@@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
234246
dseg->byte_count = cpu_to_be32(headlen);
235247

236248
mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
237-
MLX5E_TX_SKB_CB(skb)->num_dma++;
249+
wi->num_dma++;
238250

239251
dseg++;
240252
}
@@ -253,23 +265,22 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
253265
dseg->byte_count = cpu_to_be32(fsz);
254266

255267
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
256-
MLX5E_TX_SKB_CB(skb)->num_dma++;
268+
wi->num_dma++;
257269

258270
dseg++;
259271
}
260272

261-
ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
273+
ds_cnt += wi->num_dma;
262274

263275
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
264276
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
265277

266278
sq->skb[pi] = skb;
267279

268-
MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
269-
MLX5_SEND_WQEBB_NUM_DS);
270-
sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
280+
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
281+
sq->pc += wi->num_wqebbs;
271282

272-
netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
283+
netdev_tx_sent_queue(sq->txq, wi->num_bytes);
273284

274285
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
275286
netif_tx_stop_queue(sq->txq);
@@ -280,7 +291,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
280291
int bf_sz = 0;
281292

282293
if (bf && sq->uar_bf_map)
283-
bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
294+
bf_sz = wi->num_wqebbs << 3;
284295

285296
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
286297
mlx5e_tx_notify_hw(sq, wqe, bf_sz);
@@ -297,7 +308,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
297308

298309
dma_unmap_wqe_err:
299310
sq->stats.dropped++;
300-
mlx5e_dma_unmap_wqe_err(sq, skb);
311+
mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
301312

302313
dev_kfree_skb_any(skb);
303314

@@ -352,6 +363,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
352363
wqe_counter = be16_to_cpu(cqe->wqe_counter);
353364

354365
do {
366+
struct mlx5e_tx_wqe_info *wi;
355367
struct sk_buff *skb;
356368
u16 ci;
357369
int j;
@@ -360,23 +372,24 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
360372

361373
ci = sqcc & sq->wq.sz_m1;
362374
skb = sq->skb[ci];
375+
wi = &sq->wqe_info[ci];
363376

364377
if (unlikely(!skb)) { /* nop */
365378
sq->stats.nop++;
366379
sqcc++;
367380
continue;
368381
}
369382

370-
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
383+
for (j = 0; j < wi->num_dma; j++) {
371384
struct mlx5e_sq_dma *dma =
372385
mlx5e_dma_get(sq, dma_fifo_cc++);
373386

374387
mlx5e_tx_dma_unmap(sq->pdev, dma);
375388
}
376389

377390
npkts++;
378-
nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
379-
sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
391+
nbytes += wi->num_bytes;
392+
sqcc += wi->num_wqebbs;
380393
dev_kfree_skb(skb);
381394
} while (!last_wqe);
382395
}

0 commit comments

Comments
 (0)