@@ -289,6 +289,16 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
289
289
return ((const char * )bdp - (const char * )base ) / fep -> bufdesc_size ;
290
290
}
291
291
292
+ static int fec_enet_get_free_txdesc_num (struct fec_enet_private * fep )
293
+ {
294
+ int entries ;
295
+
296
+ entries = ((const char * )fep -> dirty_tx -
297
+ (const char * )fep -> cur_tx ) / fep -> bufdesc_size - 1 ;
298
+
299
+ return entries > 0 ? entries : entries + fep -> tx_ring_size ;
300
+ }
301
+
292
302
static void * swap_buffer (void * bufaddr , int len )
293
303
{
294
304
int i ;
@@ -316,103 +326,203 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
316
326
return 0 ;
317
327
}
318
328
319
- static int txq_submit_skb (struct sk_buff * skb , struct net_device * ndev )
329
+ static void
330
+ fec_enet_submit_work (struct bufdesc * bdp , struct fec_enet_private * fep )
331
+ {
332
+ const struct platform_device_id * id_entry =
333
+ platform_get_device_id (fep -> pdev );
334
+ struct bufdesc * bdp_pre ;
335
+
336
+ bdp_pre = fec_enet_get_prevdesc (bdp , fep );
337
+ if ((id_entry -> driver_data & FEC_QUIRK_ERR006358 ) &&
338
+ !(bdp_pre -> cbd_sc & BD_ENET_TX_READY )) {
339
+ fep -> delay_work .trig_tx = true;
340
+ schedule_delayed_work (& (fep -> delay_work .delay_work ),
341
+ msecs_to_jiffies (1 ));
342
+ }
343
+ }
344
+
345
+ static int
346
+ fec_enet_txq_submit_frag_skb (struct sk_buff * skb , struct net_device * ndev )
320
347
{
321
348
struct fec_enet_private * fep = netdev_priv (ndev );
322
349
const struct platform_device_id * id_entry =
323
350
platform_get_device_id (fep -> pdev );
324
- struct bufdesc * bdp , * bdp_pre ;
325
- void * bufaddr ;
326
- unsigned short status ;
351
+ struct bufdesc * bdp = fep -> cur_tx ;
352
+ struct bufdesc_ex * ebdp ;
353
+ int nr_frags = skb_shinfo (skb )-> nr_frags ;
354
+ int frag , frag_len ;
355
+ unsigned short status ;
356
+ unsigned int estatus = 0 ;
357
+ skb_frag_t * this_frag ;
327
358
unsigned int index ;
359
+ void * bufaddr ;
360
+ int i ;
328
361
329
- /* Fill in a Tx ring entry */
362
+ for (frag = 0 ; frag < nr_frags ; frag ++ ) {
363
+ this_frag = & skb_shinfo (skb )-> frags [frag ];
364
+ bdp = fec_enet_get_nextdesc (bdp , fep );
365
+ ebdp = (struct bufdesc_ex * )bdp ;
366
+
367
+ status = bdp -> cbd_sc ;
368
+ status &= ~BD_ENET_TX_STATS ;
369
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY );
370
+ frag_len = skb_shinfo (skb )-> frags [frag ].size ;
371
+
372
+ /* Handle the last BD specially */
373
+ if (frag == nr_frags - 1 ) {
374
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST );
375
+ if (fep -> bufdesc_ex ) {
376
+ estatus |= BD_ENET_TX_INT ;
377
+ if (unlikely (skb_shinfo (skb )-> tx_flags &
378
+ SKBTX_HW_TSTAMP && fep -> hwts_tx_en ))
379
+ estatus |= BD_ENET_TX_TS ;
380
+ }
381
+ }
382
+
383
+ if (fep -> bufdesc_ex ) {
384
+ if (skb -> ip_summed == CHECKSUM_PARTIAL )
385
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
386
+ ebdp -> cbd_bdu = 0 ;
387
+ ebdp -> cbd_esc = estatus ;
388
+ }
389
+
390
+ bufaddr = page_address (this_frag -> page .p ) + this_frag -> page_offset ;
391
+
392
+ index = fec_enet_get_bd_index (fep -> tx_bd_base , bdp , fep );
393
+ if (((unsigned long ) bufaddr ) & FEC_ALIGNMENT ||
394
+ id_entry -> driver_data & FEC_QUIRK_SWAP_FRAME ) {
395
+ memcpy (fep -> tx_bounce [index ], bufaddr , frag_len );
396
+ bufaddr = fep -> tx_bounce [index ];
397
+
398
+ if (id_entry -> driver_data & FEC_QUIRK_SWAP_FRAME )
399
+ swap_buffer (bufaddr , frag_len );
400
+ }
401
+
402
+ bdp -> cbd_bufaddr = dma_map_single (& fep -> pdev -> dev , bufaddr ,
403
+ frag_len , DMA_TO_DEVICE );
404
+ if (dma_mapping_error (& fep -> pdev -> dev , bdp -> cbd_bufaddr )) {
405
+ dev_kfree_skb_any (skb );
406
+ if (net_ratelimit ())
407
+ netdev_err (ndev , "Tx DMA memory map failed\n" );
408
+ goto dma_mapping_error ;
409
+ }
410
+
411
+ bdp -> cbd_datlen = frag_len ;
412
+ bdp -> cbd_sc = status ;
413
+ }
414
+
415
+ fep -> cur_tx = bdp ;
416
+
417
+ return 0 ;
418
+
419
+ dma_mapping_error :
330
420
bdp = fep -> cur_tx ;
421
+ for (i = 0 ; i < frag ; i ++ ) {
422
+ bdp = fec_enet_get_nextdesc (bdp , fep );
423
+ dma_unmap_single (& fep -> pdev -> dev , bdp -> cbd_bufaddr ,
424
+ bdp -> cbd_datlen , DMA_TO_DEVICE );
425
+ }
426
+ return NETDEV_TX_OK ;
427
+ }
331
428
332
- status = bdp -> cbd_sc ;
429
+ static int fec_enet_txq_submit_skb (struct sk_buff * skb , struct net_device * ndev )
430
+ {
431
+ struct fec_enet_private * fep = netdev_priv (ndev );
432
+ const struct platform_device_id * id_entry =
433
+ platform_get_device_id (fep -> pdev );
434
+ int nr_frags = skb_shinfo (skb )-> nr_frags ;
435
+ struct bufdesc * bdp , * last_bdp ;
436
+ void * bufaddr ;
437
+ unsigned short status ;
438
+ unsigned short buflen ;
439
+ unsigned int estatus = 0 ;
440
+ unsigned int index ;
441
+ int ret ;
333
442
334
443
/* Protocol checksum off-load for TCP and UDP. */
335
444
if (fec_enet_clear_csum (skb , ndev )) {
336
445
dev_kfree_skb_any (skb );
337
446
return NETDEV_TX_OK ;
338
447
}
339
448
340
- /* Clear all of the status flags */
449
+ /* Fill in a Tx ring entry */
450
+ bdp = fep -> cur_tx ;
451
+ status = bdp -> cbd_sc ;
341
452
status &= ~BD_ENET_TX_STATS ;
342
453
343
454
/* Set buffer length and buffer pointer */
344
455
bufaddr = skb -> data ;
345
- bdp -> cbd_datlen = skb -> len ;
456
+ buflen = skb_headlen ( skb ) ;
346
457
347
458
index = fec_enet_get_bd_index (fep -> tx_bd_base , bdp , fep );
348
-
349
- if ((( unsigned long ) bufaddr ) & FEC_ALIGNMENT ) {
350
- memcpy (fep -> tx_bounce [index ], skb -> data , skb -> len );
459
+ if ((( unsigned long ) bufaddr ) & FEC_ALIGNMENT ||
460
+ id_entry -> driver_data & FEC_QUIRK_SWAP_FRAME ) {
461
+ memcpy (fep -> tx_bounce [index ], skb -> data , buflen );
351
462
bufaddr = fep -> tx_bounce [index ];
352
- }
353
463
354
- /*
355
- * Some design made an incorrect assumption on endian mode of
356
- * the system that it's running on. As the result, driver has to
357
- * swap every frame going to and coming from the controller.
358
- */
359
- if (id_entry -> driver_data & FEC_QUIRK_SWAP_FRAME )
360
- swap_buffer (bufaddr , skb -> len );
361
-
362
- /* Save skb pointer */
363
- fep -> tx_skbuff [index ] = skb ;
464
+ if (id_entry -> driver_data & FEC_QUIRK_SWAP_FRAME )
465
+ swap_buffer (bufaddr , buflen );
466
+ }
364
467
365
468
/* Push the data cache so the CPM does not get stale memory
366
469
* data.
367
470
*/
368
471
bdp -> cbd_bufaddr = dma_map_single (& fep -> pdev -> dev , bufaddr ,
369
- skb -> len , DMA_TO_DEVICE );
472
+ buflen , DMA_TO_DEVICE );
370
473
if (dma_mapping_error (& fep -> pdev -> dev , bdp -> cbd_bufaddr )) {
371
- bdp -> cbd_bufaddr = 0 ;
372
- fep -> tx_skbuff [index ] = NULL ;
373
474
dev_kfree_skb_any (skb );
374
475
if (net_ratelimit ())
375
476
netdev_err (ndev , "Tx DMA memory map failed\n" );
376
477
return NETDEV_TX_OK ;
377
478
}
378
479
480
+ if (nr_frags ) {
481
+ ret = fec_enet_txq_submit_frag_skb (skb , ndev );
482
+ if (ret )
483
+ return ret ;
484
+ } else {
485
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST );
486
+ if (fep -> bufdesc_ex ) {
487
+ estatus = BD_ENET_TX_INT ;
488
+ if (unlikely (skb_shinfo (skb )-> tx_flags &
489
+ SKBTX_HW_TSTAMP && fep -> hwts_tx_en ))
490
+ estatus |= BD_ENET_TX_TS ;
491
+ }
492
+ }
493
+
379
494
if (fep -> bufdesc_ex ) {
380
495
381
496
struct bufdesc_ex * ebdp = (struct bufdesc_ex * )bdp ;
382
- ebdp -> cbd_bdu = 0 ;
497
+
383
498
if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_HW_TSTAMP &&
384
- fep -> hwts_tx_en )) {
385
- ebdp -> cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT );
499
+ fep -> hwts_tx_en ))
386
500
skb_shinfo (skb )-> tx_flags |= SKBTX_IN_PROGRESS ;
387
- } else {
388
- ebdp -> cbd_esc = BD_ENET_TX_INT ;
389
501
390
- /* Enable protocol checksum flags
391
- * We do not bother with the IP Checksum bits as they
392
- * are done by the kernel
393
- */
394
- if (skb -> ip_summed == CHECKSUM_PARTIAL )
395
- ebdp -> cbd_esc |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
396
- }
502
+ if (skb -> ip_summed == CHECKSUM_PARTIAL )
503
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
504
+
505
+ ebdp -> cbd_bdu = 0 ;
506
+ ebdp -> cbd_esc = estatus ;
397
507
}
398
508
509
+ last_bdp = fep -> cur_tx ;
510
+ index = fec_enet_get_bd_index (fep -> tx_bd_base , last_bdp , fep );
511
+ /* Save skb pointer */
512
+ fep -> tx_skbuff [index ] = skb ;
513
+
514
+ bdp -> cbd_datlen = buflen ;
515
+
399
516
/* Send it on its way. Tell FEC it's ready, interrupt when done,
400
517
* it's the last BD of the frame, and to put the CRC on the end.
401
518
*/
402
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
403
- | BD_ENET_TX_LAST | BD_ENET_TX_TC );
519
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC );
404
520
bdp -> cbd_sc = status ;
405
521
406
- bdp_pre = fec_enet_get_prevdesc (bdp , fep );
407
- if ((id_entry -> driver_data & FEC_QUIRK_ERR006358 ) &&
408
- !(bdp_pre -> cbd_sc & BD_ENET_TX_READY )) {
409
- fep -> delay_work .trig_tx = true;
410
- schedule_delayed_work (& (fep -> delay_work .delay_work ),
411
- msecs_to_jiffies (1 ));
412
- }
522
+ fec_enet_submit_work (bdp , fep );
413
523
414
524
/* If this was the last BD in the ring, start at the beginning again. */
415
- bdp = fec_enet_get_nextdesc (bdp , fep );
525
+ bdp = fec_enet_get_nextdesc (last_bdp , fep );
416
526
417
527
skb_tx_timestamp (skb );
418
528
@@ -421,7 +531,7 @@ static int txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
421
531
/* Trigger transmission start */
422
532
writel (0 , fep -> hwp + FEC_X_DES_ACTIVE );
423
533
424
- return NETDEV_TX_OK ;
534
+ return 0 ;
425
535
}
426
536
427
537
static netdev_tx_t
@@ -430,6 +540,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
430
540
struct fec_enet_private * fep = netdev_priv (ndev );
431
541
struct bufdesc * bdp ;
432
542
unsigned short status ;
543
+ int entries_free ;
433
544
int ret ;
434
545
435
546
/* Fill in a Tx ring entry */
@@ -441,15 +552,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
441
552
/* Ooops. All transmit buffers are full. Bail out.
442
553
* This should not happen, since ndev->tbusy should be set.
443
554
*/
444
- netdev_err (ndev , "tx queue full!\n" );
555
+ if (net_ratelimit ())
556
+ netdev_err (ndev , "tx queue full!\n" );
445
557
return NETDEV_TX_BUSY ;
446
558
}
447
559
448
- ret = txq_submit_skb (skb , ndev );
449
- if (ret == - EBUSY )
450
- return NETDEV_TX_BUSY ;
560
+ ret = fec_enet_txq_submit_skb (skb , ndev );
561
+ if (ret )
562
+ return ret ;
451
563
452
- if (fep -> cur_tx == fep -> dirty_tx )
564
+ entries_free = fec_enet_get_free_txdesc_num (fep );
565
+ if (entries_free < MAX_SKB_FRAGS + 1 )
453
566
netif_stop_queue (ndev );
454
567
455
568
return NETDEV_TX_OK ;
@@ -770,6 +883,7 @@ fec_enet_tx(struct net_device *ndev)
770
883
unsigned short status ;
771
884
struct sk_buff * skb ;
772
885
int index = 0 ;
886
+ int entries ;
773
887
774
888
fep = netdev_priv (ndev );
775
889
bdp = fep -> dirty_tx ;
@@ -786,9 +900,13 @@ fec_enet_tx(struct net_device *ndev)
786
900
index = fec_enet_get_bd_index (fep -> tx_bd_base , bdp , fep );
787
901
788
902
skb = fep -> tx_skbuff [index ];
789
- dma_unmap_single (& fep -> pdev -> dev , bdp -> cbd_bufaddr , skb -> len ,
903
+ dma_unmap_single (& fep -> pdev -> dev , bdp -> cbd_bufaddr , bdp -> cbd_datlen ,
790
904
DMA_TO_DEVICE );
791
905
bdp -> cbd_bufaddr = 0 ;
906
+ if (!skb ) {
907
+ bdp = fec_enet_get_nextdesc (bdp , fep );
908
+ continue ;
909
+ }
792
910
793
911
/* Check for errors. */
794
912
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -807,7 +925,7 @@ fec_enet_tx(struct net_device *ndev)
807
925
ndev -> stats .tx_carrier_errors ++ ;
808
926
} else {
809
927
ndev -> stats .tx_packets ++ ;
810
- ndev -> stats .tx_bytes += bdp -> cbd_datlen ;
928
+ ndev -> stats .tx_bytes += skb -> len ;
811
929
}
812
930
813
931
if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_IN_PROGRESS ) &&
@@ -844,15 +962,13 @@ fec_enet_tx(struct net_device *ndev)
844
962
845
963
/* Since we have freed up a buffer, the ring is no longer full
846
964
*/
847
- if (fep -> dirty_tx != fep -> cur_tx ) {
848
- if (netif_queue_stopped (ndev ))
849
- netif_wake_queue (ndev );
850
- }
965
+ entries = fec_enet_get_free_txdesc_num (fep );
966
+ if (entries >= MAX_SKB_FRAGS + 1 && netif_queue_stopped (ndev ))
967
+ netif_wake_queue (ndev );
851
968
}
852
969
return ;
853
970
}
854
971
855
-
856
972
/* During a receive, the cur_rx points to the current incoming buffer.
857
973
* When we update through the ring, if the next incoming buffer has
858
974
* not been given to the system, we just set the empty indicator,
@@ -2095,7 +2211,7 @@ static int fec_enet_init(struct net_device *ndev)
2095
2211
if (id_entry -> driver_data & FEC_QUIRK_HAS_CSUM ) {
2096
2212
/* enable hw accelerator */
2097
2213
ndev -> features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2098
- | NETIF_F_RXCSUM );
2214
+ | NETIF_F_RXCSUM | NETIF_F_SG );
2099
2215
fep -> csum_flags |= FLAG_RX_CSUM_ENABLED ;
2100
2216
}
2101
2217
0 commit comments