156
156
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
157
157
158
158
/* XDP */
159
- #define AM65_CPSW_XDP_CONSUMED 2
160
- #define AM65_CPSW_XDP_REDIRECT 1
159
+ #define AM65_CPSW_XDP_CONSUMED BIT(1)
160
+ #define AM65_CPSW_XDP_REDIRECT BIT(0)
161
161
#define AM65_CPSW_XDP_PASS 0
162
162
163
163
/* Include headroom compatible with both skb and xdpf */
164
- #define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
164
+ #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
165
+ #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
165
166
166
167
static void am65_cpsw_port_set_sl_mac (struct am65_cpsw_port * slave ,
167
168
const u8 * dev_addr )
@@ -933,7 +934,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
933
934
host_desc = k3_cppi_desc_pool_alloc (tx_chn -> desc_pool );
934
935
if (unlikely (!host_desc )) {
935
936
ndev -> stats .tx_dropped ++ ;
936
- return - ENOMEM ;
937
+ return AM65_CPSW_XDP_CONSUMED ; /* drop */
937
938
}
938
939
939
940
am65_cpsw_nuss_set_buf_type (tx_chn , host_desc , buf_type );
@@ -942,7 +943,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
942
943
pkt_len , DMA_TO_DEVICE );
943
944
if (unlikely (dma_mapping_error (tx_chn -> dma_dev , dma_buf ))) {
944
945
ndev -> stats .tx_dropped ++ ;
945
- ret = - ENOMEM ;
946
+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
946
947
goto pool_free ;
947
948
}
948
949
@@ -977,6 +978,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
977
978
/* Inform BQL */
978
979
netdev_tx_completed_queue (netif_txq , 1 , pkt_len );
979
980
ndev -> stats .tx_errors ++ ;
981
+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
980
982
goto dma_unmap ;
981
983
}
982
984
@@ -996,14 +998,17 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
996
998
int desc_idx , int cpu , int * len )
997
999
{
998
1000
struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
1001
+ struct am65_cpsw_ndev_priv * ndev_priv ;
999
1002
struct net_device * ndev = port -> ndev ;
1003
+ struct am65_cpsw_ndev_stats * stats ;
1000
1004
int ret = AM65_CPSW_XDP_CONSUMED ;
1001
1005
struct am65_cpsw_tx_chn * tx_chn ;
1002
1006
struct netdev_queue * netif_txq ;
1003
1007
struct xdp_frame * xdpf ;
1004
1008
struct bpf_prog * prog ;
1005
1009
struct page * page ;
1006
1010
u32 act ;
1011
+ int err ;
1007
1012
1008
1013
prog = READ_ONCE (port -> xdp_prog );
1009
1014
if (!prog )
@@ -1013,6 +1018,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1013
1018
/* XDP prog might have changed packet data and boundaries */
1014
1019
* len = xdp -> data_end - xdp -> data ;
1015
1020
1021
+ ndev_priv = netdev_priv (ndev );
1022
+ stats = this_cpu_ptr (ndev_priv -> stats );
1023
+
1016
1024
switch (act ) {
1017
1025
case XDP_PASS :
1018
1026
ret = AM65_CPSW_XDP_PASS ;
@@ -1023,31 +1031,36 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1023
1031
1024
1032
xdpf = xdp_convert_buff_to_frame (xdp );
1025
1033
if (unlikely (!xdpf ))
1026
- break ;
1034
+ goto drop ;
1027
1035
1028
1036
__netif_tx_lock (netif_txq , cpu );
1029
- ret = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
1037
+ err = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
1030
1038
AM65_CPSW_TX_BUF_TYPE_XDP_TX );
1031
1039
__netif_tx_unlock (netif_txq );
1032
- if (ret )
1033
- break ;
1040
+ if (err )
1041
+ goto drop ;
1034
1042
1035
- ndev -> stats .rx_bytes += * len ;
1036
- ndev -> stats .rx_packets ++ ;
1043
+ u64_stats_update_begin (& stats -> syncp );
1044
+ stats -> rx_bytes += * len ;
1045
+ stats -> rx_packets ++ ;
1046
+ u64_stats_update_end (& stats -> syncp );
1037
1047
ret = AM65_CPSW_XDP_CONSUMED ;
1038
1048
goto out ;
1039
1049
case XDP_REDIRECT :
1040
1050
if (unlikely (xdp_do_redirect (ndev , xdp , prog )))
1041
- break ;
1051
+ goto drop ;
1042
1052
1043
- ndev -> stats .rx_bytes += * len ;
1044
- ndev -> stats .rx_packets ++ ;
1053
+ u64_stats_update_begin (& stats -> syncp );
1054
+ stats -> rx_bytes += * len ;
1055
+ stats -> rx_packets ++ ;
1056
+ u64_stats_update_end (& stats -> syncp );
1045
1057
ret = AM65_CPSW_XDP_REDIRECT ;
1046
1058
goto out ;
1047
1059
default :
1048
1060
bpf_warn_invalid_xdp_action (ndev , prog , act );
1049
1061
fallthrough ;
1050
1062
case XDP_ABORTED :
1063
+ drop :
1051
1064
trace_xdp_exception (ndev , prog , act );
1052
1065
fallthrough ;
1053
1066
case XDP_DROP :
@@ -1056,7 +1069,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1056
1069
1057
1070
page = virt_to_head_page (xdp -> data );
1058
1071
am65_cpsw_put_page (rx_chn , page , true, desc_idx );
1059
-
1060
1072
out :
1061
1073
return ret ;
1062
1074
}
@@ -1095,7 +1107,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
1095
1107
}
1096
1108
1097
1109
static int am65_cpsw_nuss_rx_packets (struct am65_cpsw_common * common ,
1098
- u32 flow_idx , int cpu )
1110
+ u32 flow_idx , int cpu , int * xdp_state )
1099
1111
{
1100
1112
struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
1101
1113
u32 buf_dma_len , pkt_len , port_id = 0 , csum_info ;
@@ -1114,6 +1126,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1114
1126
void * * swdata ;
1115
1127
u32 * psdata ;
1116
1128
1129
+ * xdp_state = AM65_CPSW_XDP_PASS ;
1117
1130
ret = k3_udma_glue_pop_rx_chn (rx_chn -> rx_chn , flow_idx , & desc_dma );
1118
1131
if (ret ) {
1119
1132
if (ret != - ENODATA )
@@ -1161,15 +1174,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1161
1174
}
1162
1175
1163
1176
if (port -> xdp_prog ) {
1164
- xdp_init_buff (& xdp , AM65_CPSW_MAX_PACKET_SIZE , & port -> xdp_rxq );
1165
-
1166
- xdp_prepare_buff (& xdp , page_addr , skb_headroom (skb ),
1177
+ xdp_init_buff (& xdp , PAGE_SIZE , & port -> xdp_rxq );
1178
+ xdp_prepare_buff (& xdp , page_addr , AM65_CPSW_HEADROOM ,
1167
1179
pkt_len , false);
1168
-
1169
- ret = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1170
- cpu , & pkt_len );
1171
- if (ret != AM65_CPSW_XDP_PASS )
1172
- return ret ;
1180
+ * xdp_state = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1181
+ cpu , & pkt_len );
1182
+ if (* xdp_state != AM65_CPSW_XDP_PASS )
1183
+ goto allocate ;
1173
1184
1174
1185
/* Compute additional headroom to be reserved */
1175
1186
headroom = (xdp .data - xdp .data_hard_start ) - skb_headroom (skb );
@@ -1193,9 +1204,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1193
1204
stats -> rx_bytes += pkt_len ;
1194
1205
u64_stats_update_end (& stats -> syncp );
1195
1206
1207
+ allocate :
1196
1208
new_page = page_pool_dev_alloc_pages (rx_chn -> page_pool );
1197
- if (unlikely (!new_page ))
1209
+ if (unlikely (!new_page )) {
1210
+ dev_err (dev , "page alloc failed\n" );
1198
1211
return - ENOMEM ;
1212
+ }
1213
+
1199
1214
rx_chn -> pages [desc_idx ] = new_page ;
1200
1215
1201
1216
if (netif_dormant (ndev )) {
@@ -1229,29 +1244,29 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
1229
1244
struct am65_cpsw_common * common = am65_cpsw_napi_to_common (napi_rx );
1230
1245
int flow = AM65_CPSW_MAX_RX_FLOWS ;
1231
1246
int cpu = smp_processor_id ();
1232
- bool xdp_redirect = false ;
1247
+ int xdp_state_or = 0 ;
1233
1248
int cur_budget , ret ;
1249
+ int xdp_state ;
1234
1250
int num_rx = 0 ;
1235
1251
1236
1252
/* process every flow */
1237
1253
while (flow -- ) {
1238
1254
cur_budget = budget - num_rx ;
1239
1255
1240
1256
while (cur_budget -- ) {
1241
- ret = am65_cpsw_nuss_rx_packets (common , flow , cpu );
1242
- if ( ret ) {
1243
- if ( ret == AM65_CPSW_XDP_REDIRECT )
1244
- xdp_redirect = true;
1257
+ ret = am65_cpsw_nuss_rx_packets (common , flow , cpu ,
1258
+ & xdp_state );
1259
+ xdp_state_or |= xdp_state ;
1260
+ if ( ret )
1245
1261
break ;
1246
- }
1247
1262
num_rx ++ ;
1248
1263
}
1249
1264
1250
1265
if (num_rx >= budget )
1251
1266
break ;
1252
1267
}
1253
1268
1254
- if (xdp_redirect )
1269
+ if (xdp_state_or & AM65_CPSW_XDP_REDIRECT )
1255
1270
xdp_do_flush ();
1256
1271
1257
1272
dev_dbg (common -> dev , "%s num_rx:%d %d\n" , __func__ , num_rx , budget );
@@ -1918,12 +1933,13 @@ static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1918
1933
static int am65_cpsw_ndo_xdp_xmit (struct net_device * ndev , int n ,
1919
1934
struct xdp_frame * * frames , u32 flags )
1920
1935
{
1936
+ struct am65_cpsw_common * common = am65_ndev_to_common (ndev );
1921
1937
struct am65_cpsw_tx_chn * tx_chn ;
1922
1938
struct netdev_queue * netif_txq ;
1923
1939
int cpu = smp_processor_id ();
1924
1940
int i , nxmit = 0 ;
1925
1941
1926
- tx_chn = & am65_ndev_to_common ( ndev ) -> tx_chns [cpu % AM65_CPSW_MAX_TX_QUEUES ];
1942
+ tx_chn = & common -> tx_chns [cpu % common -> tx_ch_num ];
1927
1943
netif_txq = netdev_get_tx_queue (ndev , tx_chn -> id );
1928
1944
1929
1945
__netif_tx_lock (netif_txq , cpu );
0 commit comments