@@ -2797,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
2797
2797
2798
2798
/* Rx/Tx queue initialization/cleanup methods */
2799
2799
2800
- /* Create a specified RX queue */
2801
- static int mvneta_rxq_init (struct mvneta_port * pp ,
2802
- struct mvneta_rx_queue * rxq )
2803
-
2800
+ static int mvneta_rxq_sw_init (struct mvneta_port * pp ,
2801
+ struct mvneta_rx_queue * rxq )
2804
2802
{
2805
2803
rxq -> size = pp -> rx_ring_size ;
2806
2804
@@ -2813,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
2813
2811
2814
2812
rxq -> last_desc = rxq -> size - 1 ;
2815
2813
2814
+ return 0 ;
2815
+ }
2816
+
2817
+ static void mvneta_rxq_hw_init (struct mvneta_port * pp ,
2818
+ struct mvneta_rx_queue * rxq )
2819
+ {
2816
2820
/* Set Rx descriptors queue starting address */
2817
2821
mvreg_write (pp , MVNETA_RXQ_BASE_ADDR_REG (rxq -> id ), rxq -> descs_phys );
2818
2822
mvreg_write (pp , MVNETA_RXQ_SIZE_REG (rxq -> id ), rxq -> size );
@@ -2836,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
2836
2840
mvneta_rxq_short_pool_set (pp , rxq );
2837
2841
mvneta_rxq_non_occup_desc_add (pp , rxq , rxq -> size );
2838
2842
}
2843
+ }
2844
+
2845
+ /* Create a specified RX queue */
2846
+ static int mvneta_rxq_init (struct mvneta_port * pp ,
2847
+ struct mvneta_rx_queue * rxq )
2848
+
2849
+ {
2850
+ int ret ;
2851
+
2852
+ ret = mvneta_rxq_sw_init (pp , rxq );
2853
+ if (ret < 0 )
2854
+ return ret ;
2855
+
2856
+ mvneta_rxq_hw_init (pp , rxq );
2839
2857
2840
2858
return 0 ;
2841
2859
}
@@ -2858,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
2858
2876
rxq -> descs_phys = 0 ;
2859
2877
}
2860
2878
2861
- /* Create and initialize a tx queue */
2862
- static int mvneta_txq_init (struct mvneta_port * pp ,
2863
- struct mvneta_tx_queue * txq )
2879
+ static int mvneta_txq_sw_init (struct mvneta_port * pp ,
2880
+ struct mvneta_tx_queue * txq )
2864
2881
{
2865
2882
int cpu ;
2866
2883
@@ -2873,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2873
2890
txq -> tx_stop_threshold = txq -> size - MVNETA_MAX_SKB_DESCS ;
2874
2891
txq -> tx_wake_threshold = txq -> tx_stop_threshold / 2 ;
2875
2892
2876
-
2877
2893
/* Allocate memory for TX descriptors */
2878
2894
txq -> descs = dma_alloc_coherent (pp -> dev -> dev .parent ,
2879
2895
txq -> size * MVNETA_DESC_ALIGNED_SIZE ,
@@ -2883,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2883
2899
2884
2900
txq -> last_desc = txq -> size - 1 ;
2885
2901
2886
- /* Set maximum bandwidth for enabled TXQs */
2887
- mvreg_write (pp , MVETH_TXQ_TOKEN_CFG_REG (txq -> id ), 0x03ffffff );
2888
- mvreg_write (pp , MVETH_TXQ_TOKEN_COUNT_REG (txq -> id ), 0x3fffffff );
2889
-
2890
- /* Set Tx descriptors queue starting address */
2891
- mvreg_write (pp , MVNETA_TXQ_BASE_ADDR_REG (txq -> id ), txq -> descs_phys );
2892
- mvreg_write (pp , MVNETA_TXQ_SIZE_REG (txq -> id ), txq -> size );
2893
-
2894
2902
txq -> tx_skb = kmalloc_array (txq -> size , sizeof (* txq -> tx_skb ),
2895
2903
GFP_KERNEL );
2896
2904
if (!txq -> tx_skb ) {
@@ -2911,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2911
2919
txq -> descs , txq -> descs_phys );
2912
2920
return - ENOMEM ;
2913
2921
}
2914
- mvneta_tx_done_pkts_coal_set (pp , txq , txq -> done_pkts_coal );
2915
2922
2916
2923
/* Setup XPS mapping */
2917
2924
if (txq_number > 1 )
@@ -2924,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2924
2931
return 0 ;
2925
2932
}
2926
2933
2934
+ static void mvneta_txq_hw_init (struct mvneta_port * pp ,
2935
+ struct mvneta_tx_queue * txq )
2936
+ {
2937
+ /* Set maximum bandwidth for enabled TXQs */
2938
+ mvreg_write (pp , MVETH_TXQ_TOKEN_CFG_REG (txq -> id ), 0x03ffffff );
2939
+ mvreg_write (pp , MVETH_TXQ_TOKEN_COUNT_REG (txq -> id ), 0x3fffffff );
2940
+
2941
+ /* Set Tx descriptors queue starting address */
2942
+ mvreg_write (pp , MVNETA_TXQ_BASE_ADDR_REG (txq -> id ), txq -> descs_phys );
2943
+ mvreg_write (pp , MVNETA_TXQ_SIZE_REG (txq -> id ), txq -> size );
2944
+
2945
+ mvneta_tx_done_pkts_coal_set (pp , txq , txq -> done_pkts_coal );
2946
+ }
2947
+
2948
+ /* Create and initialize a tx queue */
2949
+ static int mvneta_txq_init (struct mvneta_port * pp ,
2950
+ struct mvneta_tx_queue * txq )
2951
+ {
2952
+ int ret ;
2953
+
2954
+ ret = mvneta_txq_sw_init (pp , txq );
2955
+ if (ret < 0 )
2956
+ return ret ;
2957
+
2958
+ mvneta_txq_hw_init (pp , txq );
2959
+
2960
+ return 0 ;
2961
+ }
2962
+
2927
2963
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2928
- static void mvneta_txq_deinit (struct mvneta_port * pp ,
2929
- struct mvneta_tx_queue * txq )
2964
+ static void mvneta_txq_sw_deinit (struct mvneta_port * pp ,
2965
+ struct mvneta_tx_queue * txq )
2930
2966
{
2931
2967
struct netdev_queue * nq = netdev_get_tx_queue (pp -> dev , txq -> id );
2932
2968
@@ -2947,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
2947
2983
txq -> last_desc = 0 ;
2948
2984
txq -> next_desc_to_proc = 0 ;
2949
2985
txq -> descs_phys = 0 ;
2986
+ }
2950
2987
2988
+ static void mvneta_txq_hw_deinit (struct mvneta_port * pp ,
2989
+ struct mvneta_tx_queue * txq )
2990
+ {
2951
2991
/* Set minimum bandwidth for disabled TXQs */
2952
2992
mvreg_write (pp , MVETH_TXQ_TOKEN_CFG_REG (txq -> id ), 0 );
2953
2993
mvreg_write (pp , MVETH_TXQ_TOKEN_COUNT_REG (txq -> id ), 0 );
@@ -2957,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
2957
2997
mvreg_write (pp , MVNETA_TXQ_SIZE_REG (txq -> id ), 0 );
2958
2998
}
2959
2999
3000
+ static void mvneta_txq_deinit (struct mvneta_port * pp ,
3001
+ struct mvneta_tx_queue * txq )
3002
+ {
3003
+ mvneta_txq_sw_deinit (pp , txq );
3004
+ mvneta_txq_hw_deinit (pp , txq );
3005
+ }
3006
+
2960
3007
/* Cleanup all Tx queues */
2961
3008
static void mvneta_cleanup_txqs (struct mvneta_port * pp )
2962
3009
{
0 commit comments