35
35
#include <uapi/linux/ppp_defs.h>
36
36
#include <net/ip.h>
37
37
#include <net/ipv6.h>
38
+ #include <net/tso.h>
38
39
39
40
/* RX Fifo Registers */
40
41
#define MVPP2_RX_DATA_FIFO_SIZE_REG (port ) (0x00 + 4 * (port))
@@ -1010,6 +1011,10 @@ struct mvpp2_txq_pcpu {
1010
1011
1011
1012
/* Index of the TX DMA descriptor to be cleaned up */
1012
1013
int txq_get_index ;
1014
+
1015
+ /* DMA buffer for TSO headers */
1016
+ char * tso_headers ;
1017
+ dma_addr_t tso_headers_dma ;
1013
1018
};
1014
1019
1015
1020
struct mvpp2_tx_queue {
@@ -5284,15 +5289,14 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5284
5289
5285
5290
/* Allocate and initialize descriptors for aggr TXQ */
5286
5291
static int mvpp2_aggr_txq_init (struct platform_device * pdev ,
5287
- struct mvpp2_tx_queue * aggr_txq ,
5288
- int desc_num , int cpu ,
5292
+ struct mvpp2_tx_queue * aggr_txq , int cpu ,
5289
5293
struct mvpp2 * priv )
5290
5294
{
5291
5295
u32 txq_dma ;
5292
5296
5293
5297
/* Allocate memory for TX descriptors */
5294
5298
aggr_txq -> descs = dma_alloc_coherent (& pdev -> dev ,
5295
- desc_num * MVPP2_DESC_ALIGNED_SIZE ,
5299
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE ,
5296
5300
& aggr_txq -> descs_dma , GFP_KERNEL );
5297
5301
if (!aggr_txq -> descs )
5298
5302
return - ENOMEM ;
@@ -5313,7 +5317,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5313
5317
MVPP22_AGGR_TXQ_DESC_ADDR_OFFS ;
5314
5318
5315
5319
mvpp2_write (priv , MVPP2_AGGR_TXQ_DESC_ADDR_REG (cpu ), txq_dma );
5316
- mvpp2_write (priv , MVPP2_AGGR_TXQ_DESC_SIZE_REG (cpu ), desc_num );
5320
+ mvpp2_write (priv , MVPP2_AGGR_TXQ_DESC_SIZE_REG (cpu ),
5321
+ MVPP2_AGGR_TXQ_SIZE );
5317
5322
5318
5323
return 0 ;
5319
5324
}
@@ -5494,13 +5499,26 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5494
5499
txq_pcpu -> reserved_num = 0 ;
5495
5500
txq_pcpu -> txq_put_index = 0 ;
5496
5501
txq_pcpu -> txq_get_index = 0 ;
5502
+
5503
+ txq_pcpu -> tso_headers =
5504
+ dma_alloc_coherent (port -> dev -> dev .parent ,
5505
+ MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE ,
5506
+ & txq_pcpu -> tso_headers_dma ,
5507
+ GFP_KERNEL );
5508
+ if (!txq_pcpu -> tso_headers )
5509
+ goto cleanup ;
5497
5510
}
5498
5511
5499
5512
return 0 ;
5500
5513
cleanup :
5501
5514
for_each_present_cpu (cpu ) {
5502
5515
txq_pcpu = per_cpu_ptr (txq -> pcpu , cpu );
5503
5516
kfree (txq_pcpu -> buffs );
5517
+
5518
+ dma_free_coherent (port -> dev -> dev .parent ,
5519
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE ,
5520
+ txq_pcpu -> tso_headers ,
5521
+ txq_pcpu -> tso_headers_dma );
5504
5522
}
5505
5523
5506
5524
dma_free_coherent (port -> dev -> dev .parent ,
@@ -5520,6 +5538,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
5520
5538
for_each_present_cpu (cpu ) {
5521
5539
txq_pcpu = per_cpu_ptr (txq -> pcpu , cpu );
5522
5540
kfree (txq_pcpu -> buffs );
5541
+
5542
+ dma_free_coherent (port -> dev -> dev .parent ,
5543
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE ,
5544
+ txq_pcpu -> tso_headers ,
5545
+ txq_pcpu -> tso_headers_dma );
5523
5546
}
5524
5547
5525
5548
if (txq -> descs )
@@ -6049,6 +6072,123 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6049
6072
return - ENOMEM ;
6050
6073
}
6051
6074
6075
+ static inline void mvpp2_tso_put_hdr (struct sk_buff * skb ,
6076
+ struct net_device * dev ,
6077
+ struct mvpp2_tx_queue * txq ,
6078
+ struct mvpp2_tx_queue * aggr_txq ,
6079
+ struct mvpp2_txq_pcpu * txq_pcpu ,
6080
+ int hdr_sz )
6081
+ {
6082
+ struct mvpp2_port * port = netdev_priv (dev );
6083
+ struct mvpp2_tx_desc * tx_desc = mvpp2_txq_next_desc_get (aggr_txq );
6084
+ dma_addr_t addr ;
6085
+
6086
+ mvpp2_txdesc_txq_set (port , tx_desc , txq -> id );
6087
+ mvpp2_txdesc_size_set (port , tx_desc , hdr_sz );
6088
+
6089
+ addr = txq_pcpu -> tso_headers_dma +
6090
+ txq_pcpu -> txq_put_index * TSO_HEADER_SIZE ;
6091
+ mvpp2_txdesc_offset_set (port , tx_desc , addr & MVPP2_TX_DESC_ALIGN );
6092
+ mvpp2_txdesc_dma_addr_set (port , tx_desc , addr & ~MVPP2_TX_DESC_ALIGN );
6093
+
6094
+ mvpp2_txdesc_cmd_set (port , tx_desc , mvpp2_skb_tx_csum (port , skb ) |
6095
+ MVPP2_TXD_F_DESC |
6096
+ MVPP2_TXD_PADDING_DISABLE );
6097
+ mvpp2_txq_inc_put (port , txq_pcpu , NULL , tx_desc );
6098
+ }
6099
+
6100
+ static inline int mvpp2_tso_put_data (struct sk_buff * skb ,
6101
+ struct net_device * dev , struct tso_t * tso ,
6102
+ struct mvpp2_tx_queue * txq ,
6103
+ struct mvpp2_tx_queue * aggr_txq ,
6104
+ struct mvpp2_txq_pcpu * txq_pcpu ,
6105
+ int sz , bool left , bool last )
6106
+ {
6107
+ struct mvpp2_port * port = netdev_priv (dev );
6108
+ struct mvpp2_tx_desc * tx_desc = mvpp2_txq_next_desc_get (aggr_txq );
6109
+ dma_addr_t buf_dma_addr ;
6110
+
6111
+ mvpp2_txdesc_txq_set (port , tx_desc , txq -> id );
6112
+ mvpp2_txdesc_size_set (port , tx_desc , sz );
6113
+
6114
+ buf_dma_addr = dma_map_single (dev -> dev .parent , tso -> data , sz ,
6115
+ DMA_TO_DEVICE );
6116
+ if (unlikely (dma_mapping_error (dev -> dev .parent , buf_dma_addr ))) {
6117
+ mvpp2_txq_desc_put (txq );
6118
+ return - ENOMEM ;
6119
+ }
6120
+
6121
+ mvpp2_txdesc_offset_set (port , tx_desc ,
6122
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN );
6123
+ mvpp2_txdesc_dma_addr_set (port , tx_desc ,
6124
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN );
6125
+
6126
+ if (!left ) {
6127
+ mvpp2_txdesc_cmd_set (port , tx_desc , MVPP2_TXD_L_DESC );
6128
+ if (last ) {
6129
+ mvpp2_txq_inc_put (port , txq_pcpu , skb , tx_desc );
6130
+ return 0 ;
6131
+ }
6132
+ } else {
6133
+ mvpp2_txdesc_cmd_set (port , tx_desc , 0 );
6134
+ }
6135
+
6136
+ mvpp2_txq_inc_put (port , txq_pcpu , NULL , tx_desc );
6137
+ return 0 ;
6138
+ }
6139
+
6140
+ static int mvpp2_tx_tso (struct sk_buff * skb , struct net_device * dev ,
6141
+ struct mvpp2_tx_queue * txq ,
6142
+ struct mvpp2_tx_queue * aggr_txq ,
6143
+ struct mvpp2_txq_pcpu * txq_pcpu )
6144
+ {
6145
+ struct mvpp2_port * port = netdev_priv (dev );
6146
+ struct tso_t tso ;
6147
+ int hdr_sz = skb_transport_offset (skb ) + tcp_hdrlen (skb );
6148
+ int i , len , descs = 0 ;
6149
+
6150
+ /* Check number of available descriptors */
6151
+ if (mvpp2_aggr_desc_num_check (port -> priv , aggr_txq ,
6152
+ tso_count_descs (skb )) ||
6153
+ mvpp2_txq_reserved_desc_num_proc (port -> priv , txq , txq_pcpu ,
6154
+ tso_count_descs (skb )))
6155
+ return 0 ;
6156
+
6157
+ tso_start (skb , & tso );
6158
+ len = skb -> len - hdr_sz ;
6159
+ while (len > 0 ) {
6160
+ int left = min_t (int , skb_shinfo (skb )-> gso_size , len );
6161
+ char * hdr = txq_pcpu -> tso_headers +
6162
+ txq_pcpu -> txq_put_index * TSO_HEADER_SIZE ;
6163
+
6164
+ len -= left ;
6165
+ descs ++ ;
6166
+
6167
+ tso_build_hdr (skb , hdr , & tso , left , len == 0 );
6168
+ mvpp2_tso_put_hdr (skb , dev , txq , aggr_txq , txq_pcpu , hdr_sz );
6169
+
6170
+ while (left > 0 ) {
6171
+ int sz = min_t (int , tso .size , left );
6172
+ left -= sz ;
6173
+ descs ++ ;
6174
+
6175
+ if (mvpp2_tso_put_data (skb , dev , & tso , txq , aggr_txq ,
6176
+ txq_pcpu , sz , left , len == 0 ))
6177
+ goto release ;
6178
+ tso_build_data (skb , & tso , sz );
6179
+ }
6180
+ }
6181
+
6182
+ return descs ;
6183
+
6184
+ release :
6185
+ for (i = descs - 1 ; i >= 0 ; i -- ) {
6186
+ struct mvpp2_tx_desc * tx_desc = txq -> descs + i ;
6187
+ tx_desc_unmap_put (port , txq , tx_desc );
6188
+ }
6189
+ return 0 ;
6190
+ }
6191
+
6052
6192
/* Main tx processing */
6053
6193
static int mvpp2_tx (struct sk_buff * skb , struct net_device * dev )
6054
6194
{
@@ -6066,6 +6206,10 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6066
6206
txq_pcpu = this_cpu_ptr (txq -> pcpu );
6067
6207
aggr_txq = & port -> priv -> aggr_txqs [smp_processor_id ()];
6068
6208
6209
+ if (skb_is_gso (skb )) {
6210
+ frags = mvpp2_tx_tso (skb , dev , txq , aggr_txq , txq_pcpu );
6211
+ goto out ;
6212
+ }
6069
6213
frags = skb_shinfo (skb )-> nr_frags + 1 ;
6070
6214
6071
6215
/* Check number of available descriptors */
@@ -6115,22 +6259,21 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6115
6259
}
6116
6260
}
6117
6261
6118
- txq_pcpu -> reserved_num -= frags ;
6119
- txq_pcpu -> count += frags ;
6120
- aggr_txq -> count += frags ;
6121
-
6122
- /* Enable transmit */
6123
- wmb ();
6124
- mvpp2_aggr_txq_pend_desc_add (port , frags );
6125
-
6126
- if (txq_pcpu -> size - txq_pcpu -> count < MAX_SKB_FRAGS + 1 ) {
6127
- struct netdev_queue * nq = netdev_get_tx_queue (dev , txq_id );
6128
-
6129
- netif_tx_stop_queue (nq );
6130
- }
6131
6262
out :
6132
6263
if (frags > 0 ) {
6133
6264
struct mvpp2_pcpu_stats * stats = this_cpu_ptr (port -> stats );
6265
+ struct netdev_queue * nq = netdev_get_tx_queue (dev , txq_id );
6266
+
6267
+ txq_pcpu -> reserved_num -= frags ;
6268
+ txq_pcpu -> count += frags ;
6269
+ aggr_txq -> count += frags ;
6270
+
6271
+ /* Enable transmit */
6272
+ wmb ();
6273
+ mvpp2_aggr_txq_pend_desc_add (port , frags );
6274
+
6275
+ if (txq_pcpu -> size - txq_pcpu -> count < MAX_SKB_FRAGS + 1 )
6276
+ netif_tx_stop_queue (nq );
6134
6277
6135
6278
u64_stats_update_begin (& stats -> syncp );
6136
6279
stats -> tx_packets ++ ;
@@ -7255,7 +7398,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
7255
7398
}
7256
7399
}
7257
7400
7258
- features = NETIF_F_SG | NETIF_F_IP_CSUM ;
7401
+ features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO ;
7259
7402
dev -> features = features | NETIF_F_RXCSUM ;
7260
7403
dev -> hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO ;
7261
7404
dev -> vlan_features |= features ;
@@ -7445,8 +7588,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7445
7588
for_each_present_cpu (i ) {
7446
7589
priv -> aggr_txqs [i ].id = i ;
7447
7590
priv -> aggr_txqs [i ].size = MVPP2_AGGR_TXQ_SIZE ;
7448
- err = mvpp2_aggr_txq_init (pdev , & priv -> aggr_txqs [i ],
7449
- MVPP2_AGGR_TXQ_SIZE , i , priv );
7591
+ err = mvpp2_aggr_txq_init (pdev , & priv -> aggr_txqs [i ], i , priv );
7450
7592
if (err < 0 )
7451
7593
return err ;
7452
7594
}
0 commit comments