@@ -434,10 +434,13 @@ static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
434
434
WARN_ONCE (state .up > 1 , "Garbage read into link_state" );
435
435
436
436
if (state .up != port_priv -> link_state ) {
437
- if (state .up )
437
+ if (state .up ) {
438
438
netif_carrier_on (netdev );
439
- else
439
+ netif_tx_start_all_queues (netdev );
440
+ } else {
440
441
netif_carrier_off (netdev );
442
+ netif_tx_stop_all_queues (netdev );
443
+ }
441
444
port_priv -> link_state = state .up ;
442
445
}
443
446
@@ -491,9 +494,6 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
491
494
struct ethsw_core * ethsw = port_priv -> ethsw_data ;
492
495
int err ;
493
496
494
- /* No need to allow Tx as control interface is disabled */
495
- netif_tx_stop_all_queues (netdev );
496
-
497
497
/* Explicitly set carrier off, otherwise
498
498
* netif_carrier_ok() will return true and cause 'ip link show'
499
499
* to report the LOWER_UP flag, even though the link
@@ -547,15 +547,6 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
547
547
return 0 ;
548
548
}
549
549
550
- static netdev_tx_t dpaa2_switch_port_dropframe (struct sk_buff * skb ,
551
- struct net_device * netdev )
552
- {
553
- /* we don't support I/O for now, drop the frame */
554
- dev_kfree_skb_any (skb );
555
-
556
- return NETDEV_TX_OK ;
557
- }
558
-
559
550
static int dpaa2_switch_port_parent_id (struct net_device * dev ,
560
551
struct netdev_phys_item_id * ppid )
561
552
{
@@ -772,6 +763,115 @@ static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
772
763
dev_kfree_skb (skb );
773
764
}
774
765
766
+ static int dpaa2_switch_build_single_fd (struct ethsw_core * ethsw ,
767
+ struct sk_buff * skb ,
768
+ struct dpaa2_fd * fd )
769
+ {
770
+ struct device * dev = ethsw -> dev ;
771
+ struct sk_buff * * skbh ;
772
+ dma_addr_t addr ;
773
+ u8 * buff_start ;
774
+ void * hwa ;
775
+
776
+ buff_start = PTR_ALIGN (skb -> data - DPAA2_SWITCH_TX_DATA_OFFSET -
777
+ DPAA2_SWITCH_TX_BUF_ALIGN ,
778
+ DPAA2_SWITCH_TX_BUF_ALIGN );
779
+
780
+ /* Clear FAS to have consistent values for TX confirmation. It is
781
+ * located in the first 8 bytes of the buffer's hardware annotation
782
+ * area
783
+ */
784
+ hwa = buff_start + DPAA2_SWITCH_SWA_SIZE ;
785
+ memset (hwa , 0 , 8 );
786
+
787
+ /* Store a backpointer to the skb at the beginning of the buffer
788
+ * (in the private data area) such that we can release it
789
+ * on Tx confirm
790
+ */
791
+ skbh = (struct sk_buff * * )buff_start ;
792
+ * skbh = skb ;
793
+
794
+ addr = dma_map_single (dev , buff_start ,
795
+ skb_tail_pointer (skb ) - buff_start ,
796
+ DMA_TO_DEVICE );
797
+ if (unlikely (dma_mapping_error (dev , addr )))
798
+ return - ENOMEM ;
799
+
800
+ /* Setup the FD fields */
801
+ memset (fd , 0 , sizeof (* fd ));
802
+
803
+ dpaa2_fd_set_addr (fd , addr );
804
+ dpaa2_fd_set_offset (fd , (u16 )(skb -> data - buff_start ));
805
+ dpaa2_fd_set_len (fd , skb -> len );
806
+ dpaa2_fd_set_format (fd , dpaa2_fd_single );
807
+
808
+ return 0 ;
809
+ }
810
+
811
+ static netdev_tx_t dpaa2_switch_port_tx (struct sk_buff * skb ,
812
+ struct net_device * net_dev )
813
+ {
814
+ struct ethsw_port_priv * port_priv = netdev_priv (net_dev );
815
+ struct ethsw_core * ethsw = port_priv -> ethsw_data ;
816
+ int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES ;
817
+ struct dpaa2_fd fd ;
818
+ int err ;
819
+
820
+ if (unlikely (skb_headroom (skb ) < DPAA2_SWITCH_NEEDED_HEADROOM )) {
821
+ struct sk_buff * ns ;
822
+
823
+ ns = skb_realloc_headroom (skb , DPAA2_SWITCH_NEEDED_HEADROOM );
824
+ if (unlikely (!ns )) {
825
+ net_err_ratelimited ("%s: Error reallocating skb headroom\n" , net_dev -> name );
826
+ goto err_free_skb ;
827
+ }
828
+ dev_consume_skb_any (skb );
829
+ skb = ns ;
830
+ }
831
+
832
+ /* We'll be holding a back-reference to the skb until Tx confirmation */
833
+ skb = skb_unshare (skb , GFP_ATOMIC );
834
+ if (unlikely (!skb )) {
835
+ /* skb_unshare() has already freed the skb */
836
+ net_err_ratelimited ("%s: Error copying the socket buffer\n" , net_dev -> name );
837
+ goto err_exit ;
838
+ }
839
+
840
+ /* At this stage, we do not support non-linear skbs so just try to
841
+ * linearize the skb and if that's not working, just drop the packet.
842
+ */
843
+ err = skb_linearize (skb );
844
+ if (err ) {
845
+ net_err_ratelimited ("%s: skb_linearize error (%d)!\n" , net_dev -> name , err );
846
+ goto err_free_skb ;
847
+ }
848
+
849
+ err = dpaa2_switch_build_single_fd (ethsw , skb , & fd );
850
+ if (unlikely (err )) {
851
+ net_err_ratelimited ("%s: ethsw_build_*_fd() %d\n" , net_dev -> name , err );
852
+ goto err_free_skb ;
853
+ }
854
+
855
+ do {
856
+ err = dpaa2_io_service_enqueue_qd (NULL ,
857
+ port_priv -> tx_qdid ,
858
+ 8 , 0 , & fd );
859
+ retries -- ;
860
+ } while (err == - EBUSY && retries );
861
+
862
+ if (unlikely (err < 0 )) {
863
+ dpaa2_switch_free_fd (ethsw , & fd );
864
+ goto err_exit ;
865
+ }
866
+
867
+ return NETDEV_TX_OK ;
868
+
869
+ err_free_skb :
870
+ dev_kfree_skb (skb );
871
+ err_exit :
872
+ return NETDEV_TX_OK ;
873
+ }
874
+
775
875
static const struct net_device_ops dpaa2_switch_port_ops = {
776
876
.ndo_open = dpaa2_switch_port_open ,
777
877
.ndo_stop = dpaa2_switch_port_stop ,
@@ -783,7 +883,7 @@ static const struct net_device_ops dpaa2_switch_port_ops = {
783
883
.ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats ,
784
884
.ndo_fdb_dump = dpaa2_switch_port_fdb_dump ,
785
885
786
- .ndo_start_xmit = dpaa2_switch_port_dropframe ,
886
+ .ndo_start_xmit = dpaa2_switch_port_tx ,
787
887
.ndo_get_port_parent_id = dpaa2_switch_port_parent_id ,
788
888
.ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name ,
789
889
};
@@ -1436,6 +1536,12 @@ static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
1436
1536
return skb ;
1437
1537
}
1438
1538
1539
+ static void dpaa2_switch_tx_conf (struct dpaa2_switch_fq * fq ,
1540
+ const struct dpaa2_fd * fd )
1541
+ {
1542
+ dpaa2_switch_free_fd (fq -> ethsw , fd );
1543
+ }
1544
+
1439
1545
static void dpaa2_switch_rx (struct dpaa2_switch_fq * fq ,
1440
1546
const struct dpaa2_fd * fd )
1441
1547
{
@@ -1813,7 +1919,10 @@ static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
1813
1919
continue ;
1814
1920
}
1815
1921
1816
- dpaa2_switch_rx (fq , dpaa2_dq_fd (dq ));
1922
+ if (fq -> type == DPSW_QUEUE_RX )
1923
+ dpaa2_switch_rx (fq , dpaa2_dq_fd (dq ));
1924
+ else
1925
+ dpaa2_switch_tx_conf (fq , dpaa2_dq_fd (dq ));
1817
1926
cleaned ++ ;
1818
1927
1819
1928
} while (!is_last );
@@ -2111,8 +2220,19 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
2111
2220
.flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID ,
2112
2221
};
2113
2222
struct net_device * netdev = port_priv -> netdev ;
2223
+ struct ethsw_core * ethsw = port_priv -> ethsw_data ;
2224
+ struct dpsw_if_attr dpsw_if_attr ;
2114
2225
int err ;
2115
2226
2227
+ /* Get the Tx queue for this specific port */
2228
+ err = dpsw_if_get_attributes (ethsw -> mc_io , 0 , ethsw -> dpsw_handle ,
2229
+ port_priv -> idx , & dpsw_if_attr );
2230
+ if (err ) {
2231
+ netdev_err (netdev , "dpsw_if_get_attributes err %d\n" , err );
2232
+ return err ;
2233
+ }
2234
+ port_priv -> tx_qdid = dpsw_if_attr .qdid ;
2235
+
2116
2236
/* We need to add VLAN 1 as the PVID on this port until it is under a
2117
2237
* bridge since the DPAA2 switch is not able to handle the traffic in a
2118
2238
* VLAN unaware fashion
@@ -2230,6 +2350,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
2230
2350
port_netdev -> netdev_ops = & dpaa2_switch_port_ops ;
2231
2351
port_netdev -> ethtool_ops = & dpaa2_switch_port_ethtool_ops ;
2232
2352
2353
+ port_netdev -> needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM ;
2354
+
2233
2355
/* Set MTU limits */
2234
2356
port_netdev -> min_mtu = ETH_MIN_MTU ;
2235
2357
port_netdev -> max_mtu = ETHSW_MAX_FRAME_LENGTH ;
0 commit comments