@@ -57,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
57
57
return efx_tx_get_copy_buffer (tx_queue , buffer );
58
58
}
59
59
60
- void efx_dequeue_buffer (struct efx_tx_queue * tx_queue ,
61
- struct efx_tx_buffer * buffer ,
62
- unsigned int * pkts_compl ,
63
- unsigned int * bytes_compl )
64
- {
65
- if (buffer -> unmap_len ) {
66
- struct device * dma_dev = & tx_queue -> efx -> pci_dev -> dev ;
67
- dma_addr_t unmap_addr = buffer -> dma_addr - buffer -> dma_offset ;
68
- if (buffer -> flags & EFX_TX_BUF_MAP_SINGLE )
69
- dma_unmap_single (dma_dev , unmap_addr , buffer -> unmap_len ,
70
- DMA_TO_DEVICE );
71
- else
72
- dma_unmap_page (dma_dev , unmap_addr , buffer -> unmap_len ,
73
- DMA_TO_DEVICE );
74
- buffer -> unmap_len = 0 ;
75
- }
76
-
77
- if (buffer -> flags & EFX_TX_BUF_SKB ) {
78
- struct sk_buff * skb = (struct sk_buff * )buffer -> skb ;
79
-
80
- EFX_WARN_ON_PARANOID (!pkts_compl || !bytes_compl );
81
- (* pkts_compl )++ ;
82
- (* bytes_compl ) += skb -> len ;
83
- if (tx_queue -> timestamping &&
84
- (tx_queue -> completed_timestamp_major ||
85
- tx_queue -> completed_timestamp_minor )) {
86
- struct skb_shared_hwtstamps hwtstamp ;
87
-
88
- hwtstamp .hwtstamp =
89
- efx_ptp_nic_to_kernel_time (tx_queue );
90
- skb_tstamp_tx (skb , & hwtstamp );
91
-
92
- tx_queue -> completed_timestamp_major = 0 ;
93
- tx_queue -> completed_timestamp_minor = 0 ;
94
- }
95
- dev_consume_skb_any ((struct sk_buff * )buffer -> skb );
96
- netif_vdbg (tx_queue -> efx , tx_done , tx_queue -> efx -> net_dev ,
97
- "TX queue %d transmission id %x complete\n" ,
98
- tx_queue -> queue , tx_queue -> read_count );
99
- } else if (buffer -> flags & EFX_TX_BUF_XDP ) {
100
- xdp_return_frame_rx_napi (buffer -> xdpf );
101
- }
102
-
103
- buffer -> len = 0 ;
104
- buffer -> flags = 0 ;
105
- }
106
-
107
- unsigned int efx_tx_max_skb_descs (struct efx_nic * efx )
108
- {
109
- /* Header and payload descriptor for each output segment, plus
110
- * one for every input fragment boundary within a segment
111
- */
112
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS ;
113
-
114
- /* Possibly one more per segment for option descriptors */
115
- if (efx_nic_rev (efx ) >= EFX_REV_HUNT_A0 )
116
- max_descs += EFX_TSO_MAX_SEGS ;
117
-
118
- /* Possibly more for PCIe page boundaries within input fragments */
119
- if (PAGE_SIZE > EFX_PAGE_SIZE )
120
- max_descs += max_t (unsigned int , MAX_SKB_FRAGS ,
121
- DIV_ROUND_UP (GSO_MAX_SIZE , EFX_PAGE_SIZE ));
122
-
123
- return max_descs ;
124
- }
125
-
126
60
static void efx_tx_maybe_stop_queue (struct efx_tx_queue * txq1 )
127
61
{
128
62
/* We need to consider both queues that the net core sees as one */
@@ -334,107 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
334
268
}
335
269
#endif /* EFX_USE_PIO */
336
270
337
- struct efx_tx_buffer * efx_tx_map_chunk (struct efx_tx_queue * tx_queue ,
338
- dma_addr_t dma_addr ,
339
- size_t len )
340
- {
341
- const struct efx_nic_type * nic_type = tx_queue -> efx -> type ;
342
- struct efx_tx_buffer * buffer ;
343
- unsigned int dma_len ;
344
-
345
- /* Map the fragment taking account of NIC-dependent DMA limits. */
346
- do {
347
- buffer = efx_tx_queue_get_insert_buffer (tx_queue );
348
- dma_len = nic_type -> tx_limit_len (tx_queue , dma_addr , len );
349
-
350
- buffer -> len = dma_len ;
351
- buffer -> dma_addr = dma_addr ;
352
- buffer -> flags = EFX_TX_BUF_CONT ;
353
- len -= dma_len ;
354
- dma_addr += dma_len ;
355
- ++ tx_queue -> insert_count ;
356
- } while (len );
357
-
358
- return buffer ;
359
- }
360
-
361
- /* Map all data from an SKB for DMA and create descriptors on the queue.
362
- */
363
- int efx_tx_map_data (struct efx_tx_queue * tx_queue , struct sk_buff * skb ,
364
- unsigned int segment_count )
365
- {
366
- struct efx_nic * efx = tx_queue -> efx ;
367
- struct device * dma_dev = & efx -> pci_dev -> dev ;
368
- unsigned int frag_index , nr_frags ;
369
- dma_addr_t dma_addr , unmap_addr ;
370
- unsigned short dma_flags ;
371
- size_t len , unmap_len ;
372
-
373
- nr_frags = skb_shinfo (skb )-> nr_frags ;
374
- frag_index = 0 ;
375
-
376
- /* Map header data. */
377
- len = skb_headlen (skb );
378
- dma_addr = dma_map_single (dma_dev , skb -> data , len , DMA_TO_DEVICE );
379
- dma_flags = EFX_TX_BUF_MAP_SINGLE ;
380
- unmap_len = len ;
381
- unmap_addr = dma_addr ;
382
-
383
- if (unlikely (dma_mapping_error (dma_dev , dma_addr )))
384
- return - EIO ;
385
-
386
- if (segment_count ) {
387
- /* For TSO we need to put the header in to a separate
388
- * descriptor. Map this separately if necessary.
389
- */
390
- size_t header_len = skb_transport_header (skb ) - skb -> data +
391
- (tcp_hdr (skb )-> doff << 2u );
392
-
393
- if (header_len != len ) {
394
- tx_queue -> tso_long_headers ++ ;
395
- efx_tx_map_chunk (tx_queue , dma_addr , header_len );
396
- len -= header_len ;
397
- dma_addr += header_len ;
398
- }
399
- }
400
-
401
- /* Add descriptors for each fragment. */
402
- do {
403
- struct efx_tx_buffer * buffer ;
404
- skb_frag_t * fragment ;
405
-
406
- buffer = efx_tx_map_chunk (tx_queue , dma_addr , len );
407
-
408
- /* The final descriptor for a fragment is responsible for
409
- * unmapping the whole fragment.
410
- */
411
- buffer -> flags = EFX_TX_BUF_CONT | dma_flags ;
412
- buffer -> unmap_len = unmap_len ;
413
- buffer -> dma_offset = buffer -> dma_addr - unmap_addr ;
414
-
415
- if (frag_index >= nr_frags ) {
416
- /* Store SKB details with the final buffer for
417
- * the completion.
418
- */
419
- buffer -> skb = skb ;
420
- buffer -> flags = EFX_TX_BUF_SKB | dma_flags ;
421
- return 0 ;
422
- }
423
-
424
- /* Move on to the next fragment. */
425
- fragment = & skb_shinfo (skb )-> frags [frag_index ++ ];
426
- len = skb_frag_size (fragment );
427
- dma_addr = skb_frag_dma_map (dma_dev , fragment ,
428
- 0 , len , DMA_TO_DEVICE );
429
- dma_flags = 0 ;
430
- unmap_len = len ;
431
- unmap_addr = dma_addr ;
432
-
433
- if (unlikely (dma_mapping_error (dma_dev , dma_addr )))
434
- return - EIO ;
435
- } while (1 );
436
- }
437
-
438
271
/* Remove buffers put into a tx_queue for the current packet.
439
272
* None of the buffers must have an skb attached.
440
273
*/
@@ -877,131 +710,3 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
877
710
}
878
711
}
879
712
}
880
-
881
- static unsigned int efx_tx_cb_page_count (struct efx_tx_queue * tx_queue )
882
- {
883
- return DIV_ROUND_UP (tx_queue -> ptr_mask + 1 , PAGE_SIZE >> EFX_TX_CB_ORDER );
884
- }
885
-
886
- int efx_probe_tx_queue (struct efx_tx_queue * tx_queue )
887
- {
888
- struct efx_nic * efx = tx_queue -> efx ;
889
- unsigned int entries ;
890
- int rc ;
891
-
892
- /* Create the smallest power-of-two aligned ring */
893
- entries = max (roundup_pow_of_two (efx -> txq_entries ), EFX_MIN_DMAQ_SIZE );
894
- EFX_WARN_ON_PARANOID (entries > EFX_MAX_DMAQ_SIZE );
895
- tx_queue -> ptr_mask = entries - 1 ;
896
-
897
- netif_dbg (efx , probe , efx -> net_dev ,
898
- "creating TX queue %d size %#x mask %#x\n" ,
899
- tx_queue -> queue , efx -> txq_entries , tx_queue -> ptr_mask );
900
-
901
- /* Allocate software ring */
902
- tx_queue -> buffer = kcalloc (entries , sizeof (* tx_queue -> buffer ),
903
- GFP_KERNEL );
904
- if (!tx_queue -> buffer )
905
- return - ENOMEM ;
906
-
907
- tx_queue -> cb_page = kcalloc (efx_tx_cb_page_count (tx_queue ),
908
- sizeof (tx_queue -> cb_page [0 ]), GFP_KERNEL );
909
- if (!tx_queue -> cb_page ) {
910
- rc = - ENOMEM ;
911
- goto fail1 ;
912
- }
913
-
914
- /* Allocate hardware ring */
915
- rc = efx_nic_probe_tx (tx_queue );
916
- if (rc )
917
- goto fail2 ;
918
-
919
- return 0 ;
920
-
921
- fail2 :
922
- kfree (tx_queue -> cb_page );
923
- tx_queue -> cb_page = NULL ;
924
- fail1 :
925
- kfree (tx_queue -> buffer );
926
- tx_queue -> buffer = NULL ;
927
- return rc ;
928
- }
929
-
930
- void efx_init_tx_queue (struct efx_tx_queue * tx_queue )
931
- {
932
- struct efx_nic * efx = tx_queue -> efx ;
933
-
934
- netif_dbg (efx , drv , efx -> net_dev ,
935
- "initialising TX queue %d\n" , tx_queue -> queue );
936
-
937
- tx_queue -> insert_count = 0 ;
938
- tx_queue -> write_count = 0 ;
939
- tx_queue -> packet_write_count = 0 ;
940
- tx_queue -> old_write_count = 0 ;
941
- tx_queue -> read_count = 0 ;
942
- tx_queue -> old_read_count = 0 ;
943
- tx_queue -> empty_read_count = 0 | EFX_EMPTY_COUNT_VALID ;
944
- tx_queue -> xmit_more_available = false;
945
- tx_queue -> timestamping = (efx_ptp_use_mac_tx_timestamps (efx ) &&
946
- tx_queue -> channel == efx_ptp_channel (efx ));
947
- tx_queue -> completed_desc_ptr = tx_queue -> ptr_mask ;
948
- tx_queue -> completed_timestamp_major = 0 ;
949
- tx_queue -> completed_timestamp_minor = 0 ;
950
-
951
- tx_queue -> xdp_tx = efx_channel_is_xdp_tx (tx_queue -> channel );
952
-
953
- /* Set up default function pointers. These may get replaced by
954
- * efx_nic_init_tx() based off NIC/queue capabilities.
955
- */
956
- tx_queue -> handle_tso = efx_enqueue_skb_tso ;
957
-
958
- /* Set up TX descriptor ring */
959
- efx_nic_init_tx (tx_queue );
960
-
961
- tx_queue -> initialised = true;
962
- }
963
-
964
- void efx_fini_tx_queue (struct efx_tx_queue * tx_queue )
965
- {
966
- struct efx_tx_buffer * buffer ;
967
-
968
- netif_dbg (tx_queue -> efx , drv , tx_queue -> efx -> net_dev ,
969
- "shutting down TX queue %d\n" , tx_queue -> queue );
970
-
971
- if (!tx_queue -> buffer )
972
- return ;
973
-
974
- /* Free any buffers left in the ring */
975
- while (tx_queue -> read_count != tx_queue -> write_count ) {
976
- unsigned int pkts_compl = 0 , bytes_compl = 0 ;
977
- buffer = & tx_queue -> buffer [tx_queue -> read_count & tx_queue -> ptr_mask ];
978
- efx_dequeue_buffer (tx_queue , buffer , & pkts_compl , & bytes_compl );
979
-
980
- ++ tx_queue -> read_count ;
981
- }
982
- tx_queue -> xmit_more_available = false;
983
- netdev_tx_reset_queue (tx_queue -> core_txq );
984
- }
985
-
986
- void efx_remove_tx_queue (struct efx_tx_queue * tx_queue )
987
- {
988
- int i ;
989
-
990
- if (!tx_queue -> buffer )
991
- return ;
992
-
993
- netif_dbg (tx_queue -> efx , drv , tx_queue -> efx -> net_dev ,
994
- "destroying TX queue %d\n" , tx_queue -> queue );
995
- efx_nic_remove_tx (tx_queue );
996
-
997
- if (tx_queue -> cb_page ) {
998
- for (i = 0 ; i < efx_tx_cb_page_count (tx_queue ); i ++ )
999
- efx_nic_free_buffer (tx_queue -> efx ,
1000
- & tx_queue -> cb_page [i ]);
1001
- kfree (tx_queue -> cb_page );
1002
- tx_queue -> cb_page = NULL ;
1003
- }
1004
-
1005
- kfree (tx_queue -> buffer );
1006
- tx_queue -> buffer = NULL ;
1007
- }
0 commit comments