32
32
#include <linux/of_gpio.h>
33
33
#include <linux/of_mdio.h>
34
34
#include <linux/of_net.h>
35
-
35
+ #include <linux/ip.h>
36
+ #include <linux/udp.h>
37
+ #include <linux/tcp.h>
36
38
#include "macb.h"
37
39
38
40
#define MACB_RX_BUFFER_SIZE 128
60
62
| MACB_BIT(TXERR))
61
63
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
62
64
63
- #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
64
- #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
65
+ /* Max length of transmit frame must be a multiple of 8 bytes */
66
+ #define MACB_TX_LEN_ALIGN 8
67
+ #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68
+ #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
65
69
66
70
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
71
+ #define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
67
72
68
73
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
69
74
#define MACB_WOL_ENABLED (0x1 << 1)
@@ -1223,22 +1228,36 @@ static void macb_poll_controller(struct net_device *dev)
1223
1228
1224
1229
static unsigned int macb_tx_map (struct macb * bp ,
1225
1230
struct macb_queue * queue ,
1226
- struct sk_buff * skb )
1231
+ struct sk_buff * skb ,
1232
+ unsigned int hdrlen )
1227
1233
{
1228
1234
dma_addr_t mapping ;
1229
1235
unsigned int len , entry , i , tx_head = queue -> tx_head ;
1230
1236
struct macb_tx_skb * tx_skb = NULL ;
1231
1237
struct macb_dma_desc * desc ;
1232
1238
unsigned int offset , size , count = 0 ;
1233
1239
unsigned int f , nr_frags = skb_shinfo (skb )-> nr_frags ;
1234
- unsigned int eof = 1 ;
1235
- u32 ctrl ;
1240
+ unsigned int eof = 1 , mss_mfs = 0 ;
1241
+ u32 ctrl , lso_ctrl = 0 , seq_ctrl = 0 ;
1242
+
1243
+ /* LSO */
1244
+ if (skb_shinfo (skb )-> gso_size != 0 ) {
1245
+ if (ip_hdr (skb )-> protocol == IPPROTO_UDP )
1246
+ /* UDP - UFO */
1247
+ lso_ctrl = MACB_LSO_UFO_ENABLE ;
1248
+ else
1249
+ /* TCP - TSO */
1250
+ lso_ctrl = MACB_LSO_TSO_ENABLE ;
1251
+ }
1236
1252
1237
1253
/* First, map non-paged data */
1238
1254
len = skb_headlen (skb );
1255
+
1256
+ /* first buffer length */
1257
+ size = hdrlen ;
1258
+
1239
1259
offset = 0 ;
1240
1260
while (len ) {
1241
- size = min (len , bp -> max_tx_length );
1242
1261
entry = macb_tx_ring_wrap (bp , tx_head );
1243
1262
tx_skb = & queue -> tx_skb [entry ];
1244
1263
@@ -1258,6 +1277,8 @@ static unsigned int macb_tx_map(struct macb *bp,
1258
1277
offset += size ;
1259
1278
count ++ ;
1260
1279
tx_head ++ ;
1280
+
1281
+ size = min (len , bp -> max_tx_length );
1261
1282
}
1262
1283
1263
1284
/* Then, map paged data from fragments */
@@ -1311,6 +1332,21 @@ static unsigned int macb_tx_map(struct macb *bp,
1311
1332
desc = & queue -> tx_ring [entry ];
1312
1333
desc -> ctrl = ctrl ;
1313
1334
1335
+ if (lso_ctrl ) {
1336
+ if (lso_ctrl == MACB_LSO_UFO_ENABLE )
1337
+ /* include header and FCS in value given to h/w */
1338
+ mss_mfs = skb_shinfo (skb )-> gso_size +
1339
+ skb_transport_offset (skb ) +
1340
+ ETH_FCS_LEN ;
1341
+ else /* TSO */ {
1342
+ mss_mfs = skb_shinfo (skb )-> gso_size ;
1343
+ /* TCP Sequence Number Source Select
1344
+ * can be set only for TSO
1345
+ */
1346
+ seq_ctrl = 0 ;
1347
+ }
1348
+ }
1349
+
1314
1350
do {
1315
1351
i -- ;
1316
1352
entry = macb_tx_ring_wrap (bp , i );
@@ -1325,6 +1361,16 @@ static unsigned int macb_tx_map(struct macb *bp,
1325
1361
if (unlikely (entry == (bp -> tx_ring_size - 1 )))
1326
1362
ctrl |= MACB_BIT (TX_WRAP );
1327
1363
1364
+ /* First descriptor is header descriptor */
1365
+ if (i == queue -> tx_head ) {
1366
+ ctrl |= MACB_BF (TX_LSO , lso_ctrl );
1367
+ ctrl |= MACB_BF (TX_TCP_SEQ_SRC , seq_ctrl );
1368
+ } else
1369
+ /* Only set MSS/MFS on payload descriptors
1370
+ * (second or later descriptor)
1371
+ */
1372
+ ctrl |= MACB_BF (MSS_MFS , mss_mfs );
1373
+
1328
1374
/* Set TX buffer descriptor */
1329
1375
macb_set_addr (desc , tx_skb -> mapping );
1330
1376
/* desc->addr must be visible to hardware before clearing
@@ -1350,6 +1396,43 @@ static unsigned int macb_tx_map(struct macb *bp,
1350
1396
return 0 ;
1351
1397
}
1352
1398
1399
+ static netdev_features_t macb_features_check (struct sk_buff * skb ,
1400
+ struct net_device * dev ,
1401
+ netdev_features_t features )
1402
+ {
1403
+ unsigned int nr_frags , f ;
1404
+ unsigned int hdrlen ;
1405
+
1406
+ /* Validate LSO compatibility */
1407
+
1408
+ /* there is only one buffer */
1409
+ if (!skb_is_nonlinear (skb ))
1410
+ return features ;
1411
+
1412
+ /* length of header */
1413
+ hdrlen = skb_transport_offset (skb );
1414
+ if (ip_hdr (skb )-> protocol == IPPROTO_TCP )
1415
+ hdrlen += tcp_hdrlen (skb );
1416
+
1417
+ /* For LSO:
1418
+ * When software supplies two or more payload buffers all payload buffers
1419
+ * apart from the last must be a multiple of 8 bytes in size.
1420
+ */
1421
+ if (!IS_ALIGNED (skb_headlen (skb ) - hdrlen , MACB_TX_LEN_ALIGN ))
1422
+ return features & ~MACB_NETIF_LSO ;
1423
+
1424
+ nr_frags = skb_shinfo (skb )-> nr_frags ;
1425
+ /* No need to check last fragment */
1426
+ nr_frags -- ;
1427
+ for (f = 0 ; f < nr_frags ; f ++ ) {
1428
+ const skb_frag_t * frag = & skb_shinfo (skb )-> frags [f ];
1429
+
1430
+ if (!IS_ALIGNED (skb_frag_size (frag ), MACB_TX_LEN_ALIGN ))
1431
+ return features & ~MACB_NETIF_LSO ;
1432
+ }
1433
+ return features ;
1434
+ }
1435
+
1353
1436
static inline int macb_clear_csum (struct sk_buff * skb )
1354
1437
{
1355
1438
/* no change for packets without checksum offloading */
@@ -1374,7 +1457,28 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1374
1457
struct macb * bp = netdev_priv (dev );
1375
1458
struct macb_queue * queue = & bp -> queues [queue_index ];
1376
1459
unsigned long flags ;
1377
- unsigned int count , nr_frags , frag_size , f ;
1460
+ unsigned int desc_cnt , nr_frags , frag_size , f ;
1461
+ unsigned int hdrlen ;
1462
+ bool is_lso , is_udp = 0 ;
1463
+
1464
+ is_lso = (skb_shinfo (skb )-> gso_size != 0 );
1465
+
1466
+ if (is_lso ) {
1467
+ is_udp = !!(ip_hdr (skb )-> protocol == IPPROTO_UDP );
1468
+
1469
+ /* length of headers */
1470
+ if (is_udp )
1471
+ /* only queue eth + ip headers separately for UDP */
1472
+ hdrlen = skb_transport_offset (skb );
1473
+ else
1474
+ hdrlen = skb_transport_offset (skb ) + tcp_hdrlen (skb );
1475
+ if (skb_headlen (skb ) < hdrlen ) {
1476
+ netdev_err (bp -> dev , "Error - LSO headers fragmented!!!\n" );
1477
+ /* if this is required, would need to copy to single buffer */
1478
+ return NETDEV_TX_BUSY ;
1479
+ }
1480
+ } else
1481
+ hdrlen = min (skb_headlen (skb ), bp -> max_tx_length );
1378
1482
1379
1483
#if defined(DEBUG ) && defined(VERBOSE_DEBUG )
1380
1484
netdev_vdbg (bp -> dev ,
@@ -1389,18 +1493,22 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1389
1493
* socket buffer: skb fragments of jumbo frames may need to be
1390
1494
* split into many buffer descriptors.
1391
1495
*/
1392
- count = DIV_ROUND_UP (skb_headlen (skb ), bp -> max_tx_length );
1496
+ if (is_lso && (skb_headlen (skb ) > hdrlen ))
1497
+ /* extra header descriptor if also payload in first buffer */
1498
+ desc_cnt = DIV_ROUND_UP ((skb_headlen (skb ) - hdrlen ), bp -> max_tx_length ) + 1 ;
1499
+ else
1500
+ desc_cnt = DIV_ROUND_UP (skb_headlen (skb ), bp -> max_tx_length );
1393
1501
nr_frags = skb_shinfo (skb )-> nr_frags ;
1394
1502
for (f = 0 ; f < nr_frags ; f ++ ) {
1395
1503
frag_size = skb_frag_size (& skb_shinfo (skb )-> frags [f ]);
1396
- count += DIV_ROUND_UP (frag_size , bp -> max_tx_length );
1504
+ desc_cnt += DIV_ROUND_UP (frag_size , bp -> max_tx_length );
1397
1505
}
1398
1506
1399
1507
spin_lock_irqsave (& bp -> lock , flags );
1400
1508
1401
1509
/* This is a hard error, log it. */
1402
1510
if (CIRC_SPACE (queue -> tx_head , queue -> tx_tail ,
1403
- bp -> tx_ring_size ) < count ) {
1511
+ bp -> tx_ring_size ) < desc_cnt ) {
1404
1512
netif_stop_subqueue (dev , queue_index );
1405
1513
spin_unlock_irqrestore (& bp -> lock , flags );
1406
1514
netdev_dbg (bp -> dev , "tx_head = %u, tx_tail = %u\n" ,
@@ -1414,7 +1522,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414
1522
}
1415
1523
1416
1524
/* Map socket buffer for DMA transfer */
1417
- if (!macb_tx_map (bp , queue , skb )) {
1525
+ if (!macb_tx_map (bp , queue , skb , hdrlen )) {
1418
1526
dev_kfree_skb_any (skb );
1419
1527
goto unlock ;
1420
1528
}
@@ -2354,6 +2462,7 @@ static const struct net_device_ops macb_netdev_ops = {
2354
2462
.ndo_poll_controller = macb_poll_controller ,
2355
2463
#endif
2356
2464
.ndo_set_features = macb_set_features ,
2465
+ .ndo_features_check = macb_features_check ,
2357
2466
};
2358
2467
2359
2468
/* Configure peripheral capabilities according to device tree
@@ -2560,6 +2669,11 @@ static int macb_init(struct platform_device *pdev)
2560
2669
2561
2670
/* Set features */
2562
2671
dev -> hw_features = NETIF_F_SG ;
2672
+
2673
+ /* Check LSO capability */
2674
+ if (GEM_BFEXT (PBUF_LSO , gem_readl (bp , DCFG6 )))
2675
+ dev -> hw_features |= MACB_NETIF_LSO ;
2676
+
2563
2677
/* Checksum offload is only available on gem with packet buffer */
2564
2678
if (macb_is_gem (bp ) && !(bp -> caps & MACB_CAPS_FIFO_MODE ))
2565
2679
dev -> hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM ;
0 commit comments