38
38
#include <linux/net_tstamp.h>
39
39
#include <linux/phylink.h>
40
40
#include <linux/udp.h>
41
+ #include <linux/bpf_trace.h>
41
42
#include <net/pkt_cls.h>
42
43
#include "stmmac_ptp.h"
43
44
#include "stmmac.h"
45
+ #include "stmmac_xdp.h"
44
46
#include <linux/reset.h>
45
47
#include <linux/of_mdio.h>
46
48
#include "dwmac1000.h"
@@ -67,6 +69,9 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
67
69
#define STMMAC_TX_THRESH (x ) ((x)->dma_tx_size / 4)
68
70
#define STMMAC_RX_THRESH (x ) ((x)->dma_rx_size / 4)
69
71
72
+ #define STMMAC_XDP_PASS 0
73
+ #define STMMAC_XDP_CONSUMED BIT(0)
74
+
70
75
static int flow_ctrl = FLOW_AUTO ;
71
76
module_param (flow_ctrl , int , 0644 );
72
77
MODULE_PARM_DESC (flow_ctrl , "Flow control ability [on/off]" );
@@ -1384,6 +1389,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1384
1389
buf -> page = page_pool_dev_alloc_pages (rx_q -> page_pool );
1385
1390
if (!buf -> page )
1386
1391
return - ENOMEM ;
1392
+ buf -> page_offset = stmmac_rx_offset (priv );
1387
1393
1388
1394
if (priv -> sph ) {
1389
1395
buf -> sec_page = page_pool_dev_alloc_pages (rx_q -> page_pool );
@@ -1397,7 +1403,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1397
1403
stmmac_set_desc_sec_addr (priv , p , buf -> sec_addr , false);
1398
1404
}
1399
1405
1400
- buf -> addr = page_pool_get_dma_addr (buf -> page );
1406
+ buf -> addr = page_pool_get_dma_addr (buf -> page ) + buf -> page_offset ;
1407
+
1401
1408
stmmac_set_desc_addr (priv , p , buf -> addr );
1402
1409
if (priv -> dma_buf_sz == BUF_SIZE_16KiB )
1403
1410
stmmac_init_desc3 (priv , p );
@@ -1503,7 +1510,8 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
1503
1510
if (!buf -> page )
1504
1511
goto err_reinit_rx_buffers ;
1505
1512
1506
- buf -> addr = page_pool_get_dma_addr (buf -> page );
1513
+ buf -> addr = page_pool_get_dma_addr (buf -> page ) +
1514
+ buf -> page_offset ;
1507
1515
}
1508
1516
1509
1517
if (priv -> sph && !buf -> sec_page ) {
@@ -1821,6 +1829,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1821
1829
*/
1822
1830
static int alloc_dma_rx_desc_resources (struct stmmac_priv * priv )
1823
1831
{
1832
+ bool xdp_prog = stmmac_xdp_is_enabled (priv );
1824
1833
u32 rx_count = priv -> plat -> rx_queues_to_use ;
1825
1834
int ret = - ENOMEM ;
1826
1835
u32 queue ;
@@ -1834,13 +1843,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1834
1843
rx_q -> queue_index = queue ;
1835
1844
rx_q -> priv_data = priv ;
1836
1845
1837
- pp_params .flags = PP_FLAG_DMA_MAP ;
1846
+ pp_params .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
1838
1847
pp_params .pool_size = priv -> dma_rx_size ;
1839
1848
num_pages = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE );
1840
1849
pp_params .order = ilog2 (num_pages );
1841
1850
pp_params .nid = dev_to_node (priv -> device );
1842
1851
pp_params .dev = priv -> device ;
1843
- pp_params .dma_dir = DMA_FROM_DEVICE ;
1852
+ pp_params .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE ;
1853
+ pp_params .offset = stmmac_rx_offset (priv );
1854
+ pp_params .max_len = STMMAC_MAX_RX_BUF_SIZE (num_pages );
1844
1855
1845
1856
rx_q -> page_pool = page_pool_create (& pp_params );
1846
1857
if (IS_ERR (rx_q -> page_pool )) {
@@ -3268,7 +3279,7 @@ static int stmmac_request_irq(struct net_device *dev)
3268
3279
* 0 on success and an appropriate (-)ve integer as defined in errno.h
3269
3280
* file on failure.
3270
3281
*/
3271
- static int stmmac_open (struct net_device * dev )
3282
+ int stmmac_open (struct net_device * dev )
3272
3283
{
3273
3284
struct stmmac_priv * priv = netdev_priv (dev );
3274
3285
int bfsize = 0 ;
@@ -3391,7 +3402,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3391
3402
* Description:
3392
3403
* This is the stop entry point of the driver.
3393
3404
*/
3394
- static int stmmac_release (struct net_device * dev )
3405
+ int stmmac_release (struct net_device * dev )
3395
3406
{
3396
3407
struct stmmac_priv * priv = netdev_priv (dev );
3397
3408
u32 chan ;
@@ -4064,11 +4075,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4064
4075
static inline void stmmac_rx_refill (struct stmmac_priv * priv , u32 queue )
4065
4076
{
4066
4077
struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
4067
- int len , dirty = stmmac_rx_dirty (priv , queue );
4078
+ int dirty = stmmac_rx_dirty (priv , queue );
4068
4079
unsigned int entry = rx_q -> dirty_rx ;
4069
4080
4070
- len = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE ) * PAGE_SIZE ;
4071
-
4072
4081
while (dirty -- > 0 ) {
4073
4082
struct stmmac_rx_buffer * buf = & rx_q -> buf_pool [entry ];
4074
4083
struct dma_desc * p ;
@@ -4091,18 +4100,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4091
4100
break ;
4092
4101
4093
4102
buf -> sec_addr = page_pool_get_dma_addr (buf -> sec_page );
4094
-
4095
- dma_sync_single_for_device (priv -> device , buf -> sec_addr ,
4096
- len , DMA_FROM_DEVICE );
4097
4103
}
4098
4104
4099
- buf -> addr = page_pool_get_dma_addr (buf -> page );
4100
-
4101
- /* Sync whole allocation to device. This will invalidate old
4102
- * data.
4103
- */
4104
- dma_sync_single_for_device (priv -> device , buf -> addr , len ,
4105
- DMA_FROM_DEVICE );
4105
+ buf -> addr = page_pool_get_dma_addr (buf -> page ) + buf -> page_offset ;
4106
4106
4107
4107
stmmac_set_desc_addr (priv , p , buf -> addr );
4108
4108
if (priv -> sph )
@@ -4181,6 +4181,42 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4181
4181
return plen - len ;
4182
4182
}
4183
4183
4184
+ static struct sk_buff * stmmac_xdp_run_prog (struct stmmac_priv * priv ,
4185
+ struct xdp_buff * xdp )
4186
+ {
4187
+ struct bpf_prog * prog ;
4188
+ int res ;
4189
+ u32 act ;
4190
+
4191
+ rcu_read_lock ();
4192
+
4193
+ prog = READ_ONCE (priv -> xdp_prog );
4194
+ if (!prog ) {
4195
+ res = STMMAC_XDP_PASS ;
4196
+ goto unlock ;
4197
+ }
4198
+
4199
+ act = bpf_prog_run_xdp (prog , xdp );
4200
+ switch (act ) {
4201
+ case XDP_PASS :
4202
+ res = STMMAC_XDP_PASS ;
4203
+ break ;
4204
+ default :
4205
+ bpf_warn_invalid_xdp_action (act );
4206
+ fallthrough ;
4207
+ case XDP_ABORTED :
4208
+ trace_xdp_exception (priv -> dev , prog , act );
4209
+ fallthrough ;
4210
+ case XDP_DROP :
4211
+ res = STMMAC_XDP_CONSUMED ;
4212
+ break ;
4213
+ }
4214
+
4215
+ unlock :
4216
+ rcu_read_unlock ();
4217
+ return ERR_PTR (- res );
4218
+ }
4219
+
4184
4220
/**
4185
4221
* stmmac_rx - manage the receive process
4186
4222
* @priv: driver private structure
@@ -4196,8 +4232,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
4196
4232
unsigned int count = 0 , error = 0 , len = 0 ;
4197
4233
int status = 0 , coe = priv -> hw -> rx_csum ;
4198
4234
unsigned int next_entry = rx_q -> cur_rx ;
4235
+ enum dma_data_direction dma_dir ;
4199
4236
unsigned int desc_size ;
4200
4237
struct sk_buff * skb = NULL ;
4238
+ struct xdp_buff xdp ;
4239
+ int buf_sz ;
4240
+
4241
+ dma_dir = page_pool_get_dma_dir (rx_q -> page_pool );
4242
+ buf_sz = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE ) * PAGE_SIZE ;
4201
4243
4202
4244
if (netif_msg_rx_status (priv )) {
4203
4245
void * rx_head ;
@@ -4315,27 +4357,64 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
4315
4357
}
4316
4358
4317
4359
if (!skb ) {
4360
+ dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4361
+ buf1_len , dma_dir );
4362
+
4363
+ xdp .data = page_address (buf -> page ) + buf -> page_offset ;
4364
+ xdp .data_end = xdp .data + buf1_len ;
4365
+ xdp .data_hard_start = page_address (buf -> page );
4366
+ xdp_set_data_meta_invalid (& xdp );
4367
+ xdp .frame_sz = buf_sz ;
4368
+
4369
+ skb = stmmac_xdp_run_prog (priv , & xdp );
4370
+
4371
+ /* For Not XDP_PASS verdict */
4372
+ if (IS_ERR (skb )) {
4373
+ unsigned int xdp_res = - PTR_ERR (skb );
4374
+
4375
+ if (xdp_res & STMMAC_XDP_CONSUMED ) {
4376
+ page_pool_recycle_direct (rx_q -> page_pool ,
4377
+ buf -> page );
4378
+ buf -> page = NULL ;
4379
+ priv -> dev -> stats .rx_dropped ++ ;
4380
+
4381
+ /* Clear skb as it was set as
4382
+ * status by XDP program.
4383
+ */
4384
+ skb = NULL ;
4385
+
4386
+ if (unlikely ((status & rx_not_ls )))
4387
+ goto read_again ;
4388
+
4389
+ count ++ ;
4390
+ continue ;
4391
+ }
4392
+ }
4393
+ }
4394
+
4395
+ if (!skb ) {
4396
+ /* XDP program may expand or reduce tail */
4397
+ buf1_len = xdp .data_end - xdp .data ;
4398
+
4318
4399
skb = napi_alloc_skb (& ch -> rx_napi , buf1_len );
4319
4400
if (!skb ) {
4320
4401
priv -> dev -> stats .rx_dropped ++ ;
4321
4402
count ++ ;
4322
4403
goto drain_data ;
4323
4404
}
4324
4405
4325
- dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4326
- buf1_len , DMA_FROM_DEVICE );
4327
- skb_copy_to_linear_data (skb , page_address (buf -> page ),
4328
- buf1_len );
4406
+ /* XDP program may adjust header */
4407
+ skb_copy_to_linear_data (skb , xdp .data , buf1_len );
4329
4408
skb_put (skb , buf1_len );
4330
4409
4331
4410
/* Data payload copied into SKB, page ready for recycle */
4332
4411
page_pool_recycle_direct (rx_q -> page_pool , buf -> page );
4333
4412
buf -> page = NULL ;
4334
4413
} else if (buf1_len ) {
4335
4414
dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4336
- buf1_len , DMA_FROM_DEVICE );
4415
+ buf1_len , dma_dir );
4337
4416
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
4338
- buf -> page , 0 , buf1_len ,
4417
+ buf -> page , buf -> page_offset , buf1_len ,
4339
4418
priv -> dma_buf_sz );
4340
4419
4341
4420
/* Data payload appended into SKB */
@@ -4345,7 +4424,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
4345
4424
4346
4425
if (buf2_len ) {
4347
4426
dma_sync_single_for_cpu (priv -> device , buf -> sec_addr ,
4348
- buf2_len , DMA_FROM_DEVICE );
4427
+ buf2_len , dma_dir );
4349
4428
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
4350
4429
buf -> sec_page , 0 , buf2_len ,
4351
4430
priv -> dma_buf_sz );
@@ -4503,6 +4582,11 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4503
4582
return - EBUSY ;
4504
4583
}
4505
4584
4585
+ if (stmmac_xdp_is_enabled (priv ) && new_mtu > ETH_DATA_LEN ) {
4586
+ netdev_dbg (priv -> dev , "Jumbo frames not supported for XDP\n" );
4587
+ return - EINVAL ;
4588
+ }
4589
+
4506
4590
new_mtu = STMMAC_ALIGN (new_mtu );
4507
4591
4508
4592
/* If condition true, FIFO is too small or MTU too large */
@@ -4564,6 +4648,7 @@ static int stmmac_set_features(struct net_device *netdev,
4564
4648
stmmac_rx_ipc (priv , priv -> hw );
4565
4649
4566
4650
sph_en = (priv -> hw -> rx_csum > 0 ) && priv -> sph ;
4651
+
4567
4652
for (chan = 0 ; chan < priv -> plat -> rx_queues_to_use ; chan ++ )
4568
4653
stmmac_enable_sph (priv , priv -> ioaddr , sph_en , chan );
4569
4654
@@ -5299,6 +5384,18 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
5299
5384
return ret ;
5300
5385
}
5301
5386
5387
+ static int stmmac_bpf (struct net_device * dev , struct netdev_bpf * bpf )
5388
+ {
5389
+ struct stmmac_priv * priv = netdev_priv (dev );
5390
+
5391
+ switch (bpf -> command ) {
5392
+ case XDP_SETUP_PROG :
5393
+ return stmmac_xdp_set_prog (priv , bpf -> prog , bpf -> extack );
5394
+ default :
5395
+ return - EOPNOTSUPP ;
5396
+ }
5397
+ }
5398
+
5302
5399
static const struct net_device_ops stmmac_netdev_ops = {
5303
5400
.ndo_open = stmmac_open ,
5304
5401
.ndo_start_xmit = stmmac_xmit ,
@@ -5317,6 +5414,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
5317
5414
.ndo_set_mac_address = stmmac_set_mac_address ,
5318
5415
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid ,
5319
5416
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid ,
5417
+ .ndo_bpf = stmmac_bpf ,
5320
5418
};
5321
5419
5322
5420
static void stmmac_reset_subtask (struct stmmac_priv * priv )
0 commit comments