@@ -171,6 +171,14 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
171
171
ctrl_ext | IGC_CTRL_EXT_DRV_LOAD );
172
172
}
173
173
174
+ static void igc_unmap_tx_buffer (struct device * dev , struct igc_tx_buffer * buf )
175
+ {
176
+ dma_unmap_single (dev , dma_unmap_addr (buf , dma ),
177
+ dma_unmap_len (buf , len ), DMA_TO_DEVICE );
178
+
179
+ dma_unmap_len_set (buf , len , 0 );
180
+ }
181
+
174
182
/**
175
183
* igc_clean_tx_ring - Free Tx Buffers
176
184
* @tx_ring: ring to be cleaned
@@ -188,11 +196,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
188
196
else
189
197
dev_kfree_skb_any (tx_buffer -> skb );
190
198
191
- /* unmap skb header data */
192
- dma_unmap_single (tx_ring -> dev ,
193
- dma_unmap_addr (tx_buffer , dma ),
194
- dma_unmap_len (tx_buffer , len ),
195
- DMA_TO_DEVICE );
199
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
196
200
197
201
/* check for eop_desc to determine the end of the packet */
198
202
eop_desc = tx_buffer -> next_to_watch ;
@@ -211,10 +215,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
211
215
212
216
/* unmap any remaining paged data */
213
217
if (dma_unmap_len (tx_buffer , len ))
214
- dma_unmap_page (tx_ring -> dev ,
215
- dma_unmap_addr (tx_buffer , dma ),
216
- dma_unmap_len (tx_buffer , len ),
217
- DMA_TO_DEVICE );
218
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
218
219
}
219
220
220
221
/* move us one more past the eop_desc for start of next pkt */
@@ -1219,23 +1220,15 @@ static int igc_tx_map(struct igc_ring *tx_ring,
1219
1220
/* clear dma mappings for failed tx_buffer_info map */
1220
1221
while (tx_buffer != first ) {
1221
1222
if (dma_unmap_len (tx_buffer , len ))
1222
- dma_unmap_page (tx_ring -> dev ,
1223
- dma_unmap_addr (tx_buffer , dma ),
1224
- dma_unmap_len (tx_buffer , len ),
1225
- DMA_TO_DEVICE );
1226
- dma_unmap_len_set (tx_buffer , len , 0 );
1223
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
1227
1224
1228
1225
if (i -- == 0 )
1229
1226
i += tx_ring -> count ;
1230
1227
tx_buffer = & tx_ring -> tx_buffer_info [i ];
1231
1228
}
1232
1229
1233
1230
if (dma_unmap_len (tx_buffer , len ))
1234
- dma_unmap_single (tx_ring -> dev ,
1235
- dma_unmap_addr (tx_buffer , dma ),
1236
- dma_unmap_len (tx_buffer , len ),
1237
- DMA_TO_DEVICE );
1238
- dma_unmap_len_set (tx_buffer , len , 0 );
1231
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
1239
1232
1240
1233
dev_kfree_skb_any (tx_buffer -> skb );
1241
1234
tx_buffer -> skb = NULL ;
@@ -2317,14 +2310,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2317
2310
else
2318
2311
napi_consume_skb (tx_buffer -> skb , napi_budget );
2319
2312
2320
- /* unmap skb header data */
2321
- dma_unmap_single (tx_ring -> dev ,
2322
- dma_unmap_addr (tx_buffer , dma ),
2323
- dma_unmap_len (tx_buffer , len ),
2324
- DMA_TO_DEVICE );
2325
-
2326
- /* clear tx_buffer data */
2327
- dma_unmap_len_set (tx_buffer , len , 0 );
2313
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
2328
2314
2329
2315
/* clear last DMA location and unmap remaining buffers */
2330
2316
while (tx_desc != eop_desc ) {
@@ -2338,13 +2324,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2338
2324
}
2339
2325
2340
2326
/* unmap any remaining paged data */
2341
- if (dma_unmap_len (tx_buffer , len )) {
2342
- dma_unmap_page (tx_ring -> dev ,
2343
- dma_unmap_addr (tx_buffer , dma ),
2344
- dma_unmap_len (tx_buffer , len ),
2345
- DMA_TO_DEVICE );
2346
- dma_unmap_len_set (tx_buffer , len , 0 );
2347
- }
2327
+ if (dma_unmap_len (tx_buffer , len ))
2328
+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
2348
2329
}
2349
2330
2350
2331
/* move us one more past the eop_desc for start of next pkt */
0 commit comments