@@ -502,6 +502,7 @@ static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
502
502
struct page * page ,
503
503
bool allow_direct );
504
504
static void am65_cpsw_nuss_rx_cleanup (void * data , dma_addr_t desc_dma );
505
+ static void am65_cpsw_nuss_tx_cleanup (void * data , dma_addr_t desc_dma );
505
506
506
507
static void am65_cpsw_destroy_rxq (struct am65_cpsw_common * common , int id )
507
508
{
@@ -656,6 +657,76 @@ static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
656
657
return ret ;
657
658
}
658
659
660
+ static void am65_cpsw_destroy_txq (struct am65_cpsw_common * common , int id )
661
+ {
662
+ struct am65_cpsw_tx_chn * tx_chn = & common -> tx_chns [id ];
663
+
664
+ napi_disable (& tx_chn -> napi_tx );
665
+ hrtimer_cancel (& tx_chn -> tx_hrtimer );
666
+ k3_udma_glue_reset_tx_chn (tx_chn -> tx_chn , tx_chn ,
667
+ am65_cpsw_nuss_tx_cleanup );
668
+ k3_udma_glue_disable_tx_chn (tx_chn -> tx_chn );
669
+ }
670
+
671
+ static void am65_cpsw_destroy_txqs (struct am65_cpsw_common * common )
672
+ {
673
+ struct am65_cpsw_tx_chn * tx_chn = common -> tx_chns ;
674
+ int id ;
675
+
676
+ /* shutdown tx channels */
677
+ atomic_set (& common -> tdown_cnt , common -> tx_ch_num );
678
+ /* ensure new tdown_cnt value is visible */
679
+ smp_mb__after_atomic ();
680
+ reinit_completion (& common -> tdown_complete );
681
+
682
+ for (id = 0 ; id < common -> tx_ch_num ; id ++ )
683
+ k3_udma_glue_tdown_tx_chn (tx_chn [id ].tx_chn , false);
684
+
685
+ id = wait_for_completion_timeout (& common -> tdown_complete ,
686
+ msecs_to_jiffies (1000 ));
687
+ if (!id )
688
+ dev_err (common -> dev , "tx teardown timeout\n" );
689
+
690
+ for (id = common -> tx_ch_num - 1 ; id >= 0 ; id -- )
691
+ am65_cpsw_destroy_txq (common , id );
692
+ }
693
+
694
+ static int am65_cpsw_create_txq (struct am65_cpsw_common * common , int id )
695
+ {
696
+ struct am65_cpsw_tx_chn * tx_chn = & common -> tx_chns [id ];
697
+ int ret ;
698
+
699
+ ret = k3_udma_glue_enable_tx_chn (tx_chn -> tx_chn );
700
+ if (ret )
701
+ return ret ;
702
+
703
+ napi_enable (& tx_chn -> napi_tx );
704
+
705
+ return 0 ;
706
+ }
707
+
708
+ static int am65_cpsw_create_txqs (struct am65_cpsw_common * common )
709
+ {
710
+ int id , ret ;
711
+
712
+ for (id = 0 ; id < common -> tx_ch_num ; id ++ ) {
713
+ ret = am65_cpsw_create_txq (common , id );
714
+ if (ret ) {
715
+ dev_err (common -> dev , "couldn't create txq %d: %d\n" ,
716
+ id , ret );
717
+ goto err ;
718
+ }
719
+ }
720
+
721
+ return 0 ;
722
+
723
+ err :
724
+ for (-- id ; id >= 0 ; id -- )
725
+ am65_cpsw_destroy_txq (common , id );
726
+
727
+ return ret ;
728
+ }
729
+
659
730
static int am65_cpsw_nuss_desc_idx (struct k3_cppi_desc_pool * desc_pool ,
660
731
void * desc ,
661
732
unsigned char dsize_log2 )
@@ -790,9 +861,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
790
861
static int am65_cpsw_nuss_common_open (struct am65_cpsw_common * common )
791
862
{
792
863
struct am65_cpsw_host * host_p = am65_common_get_host (common );
793
- struct am65_cpsw_tx_chn * tx_chn = common -> tx_chns ;
794
- int port_idx , ret , tx ;
795
864
u32 val , port_mask ;
865
+ int port_idx , ret ;
796
866
797
867
if (common -> usage_count )
798
868
return 0 ;
@@ -856,67 +926,28 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
856
926
if (ret )
857
927
return ret ;
858
928
859
- for (tx = 0 ; tx < common -> tx_ch_num ; tx ++ ) {
860
- ret = k3_udma_glue_enable_tx_chn (tx_chn [tx ].tx_chn );
861
- if (ret ) {
862
- dev_err (common -> dev , "couldn't enable tx chn %d: %d\n" ,
863
- tx , ret );
864
- tx -- ;
865
- goto fail_tx ;
866
- }
867
- napi_enable (& tx_chn [tx ].napi_tx );
868
- }
929
+ ret = am65_cpsw_create_txqs (common );
930
+ if (ret )
931
+ goto cleanup_rx ;
869
932
870
933
dev_dbg (common -> dev , "cpsw_nuss started\n" );
871
934
return 0 ;
872
935
873
- fail_tx :
874
- while (tx >= 0 ) {
875
- napi_disable (& tx_chn [tx ].napi_tx );
876
- k3_udma_glue_disable_tx_chn (tx_chn [tx ].tx_chn );
877
- tx -- ;
878
- }
879
-
936
+ cleanup_rx :
880
937
am65_cpsw_destroy_rxqs (common );
881
938
882
939
return ret ;
883
940
}
884
941
885
942
static int am65_cpsw_nuss_common_stop (struct am65_cpsw_common * common )
886
943
{
887
- struct am65_cpsw_tx_chn * tx_chn = common -> tx_chns ;
888
- int i ;
889
-
890
944
if (common -> usage_count != 1 )
891
945
return 0 ;
892
946
893
947
cpsw_ale_control_set (common -> ale , HOST_PORT_NUM ,
894
948
ALE_PORT_STATE , ALE_PORT_STATE_DISABLE );
895
949
896
- /* shutdown tx channels */
897
- atomic_set (& common -> tdown_cnt , common -> tx_ch_num );
898
- /* ensure new tdown_cnt value is visible */
899
- smp_mb__after_atomic ();
900
- reinit_completion (& common -> tdown_complete );
901
-
902
- for (i = 0 ; i < common -> tx_ch_num ; i ++ )
903
- k3_udma_glue_tdown_tx_chn (tx_chn [i ].tx_chn , false);
904
-
905
- i = wait_for_completion_timeout (& common -> tdown_complete ,
906
- msecs_to_jiffies (1000 ));
907
- if (!i )
908
- dev_err (common -> dev , "tx timeout\n" );
909
- for (i = 0 ; i < common -> tx_ch_num ; i ++ ) {
910
- napi_disable (& tx_chn [i ].napi_tx );
911
- hrtimer_cancel (& tx_chn [i ].tx_hrtimer );
912
- }
913
-
914
- for (i = 0 ; i < common -> tx_ch_num ; i ++ ) {
915
- k3_udma_glue_reset_tx_chn (tx_chn [i ].tx_chn , & tx_chn [i ],
916
- am65_cpsw_nuss_tx_cleanup );
917
- k3_udma_glue_disable_tx_chn (tx_chn [i ].tx_chn );
918
- }
919
-
950
+ am65_cpsw_destroy_txqs (common );
920
951
am65_cpsw_destroy_rxqs (common );
921
952
cpsw_ale_stop (common -> ale );
922
953
0 commit comments