@@ -30,163 +30,63 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
30
30
return work_done ;
31
31
}
32
32
33
- static int mlx5e_alloc_trap_rq (struct mlx5e_priv * priv , struct mlx5e_rq_param * rqp ,
34
- struct mlx5e_rq_stats * stats , struct mlx5e_params * params ,
35
- struct mlx5e_ch_stats * ch_stats ,
36
- struct mlx5e_rq * rq )
33
+ static void mlx5e_free_trap_rq (struct mlx5e_rq * rq )
37
34
{
38
- void * rqc_wq = MLX5_ADDR_OF (rqc , rqp -> rqc , wq );
39
- struct mlx5_core_dev * mdev = priv -> mdev ;
40
- struct page_pool_params pp_params = {};
41
- int node = dev_to_node (mdev -> device );
42
- u32 pool_size ;
43
- int wq_sz ;
44
- int err ;
45
- int i ;
46
-
47
- rqp -> wq .db_numa_node = node ;
48
-
49
- rq -> wq_type = params -> rq_wq_type ;
50
- rq -> pdev = mdev -> device ;
51
- rq -> netdev = priv -> netdev ;
52
- rq -> mdev = mdev ;
53
- rq -> priv = priv ;
54
- rq -> stats = stats ;
55
- rq -> clock = & mdev -> clock ;
56
- rq -> tstamp = & priv -> tstamp ;
57
- rq -> hw_mtu = MLX5E_SW2HW_MTU (params , params -> sw_mtu );
58
-
59
- xdp_rxq_info_unused (& rq -> xdp_rxq );
60
-
61
- rq -> buff .map_dir = DMA_FROM_DEVICE ;
62
- rq -> buff .headroom = mlx5e_get_rq_headroom (mdev , params , NULL );
63
- pool_size = 1 << params -> log_rq_mtu_frames ;
64
-
65
- err = mlx5_wq_cyc_create (mdev , & rqp -> wq , rqc_wq , & rq -> wqe .wq , & rq -> wq_ctrl );
66
- if (err )
67
- return err ;
68
-
69
- rq -> wqe .wq .db = & rq -> wqe .wq .db [MLX5_RCV_DBR ];
70
-
71
- wq_sz = mlx5_wq_cyc_get_size (& rq -> wqe .wq );
72
-
73
- rq -> wqe .info = rqp -> frags_info ;
74
- rq -> buff .frame0_sz = rq -> wqe .info .arr [0 ].frag_stride ;
75
- rq -> wqe .frags = kvzalloc_node (array_size (sizeof (* rq -> wqe .frags ),
76
- (wq_sz << rq -> wqe .info .log_num_frags )),
77
- GFP_KERNEL , node );
78
- if (!rq -> wqe .frags ) {
79
- err = - ENOMEM ;
80
- goto err_wq_cyc_destroy ;
81
- }
82
-
83
- err = mlx5e_init_di_list (rq , wq_sz , node );
84
- if (err )
85
- goto err_free_frags ;
86
-
87
- rq -> mkey_be = cpu_to_be32 (mdev -> mlx5e_res .hw_objs .mkey .key );
88
-
89
- mlx5e_rq_set_trap_handlers (rq , params );
90
-
91
- /* Create a page_pool and register it with rxq */
92
- pp_params .order = 0 ;
93
- pp_params .flags = 0 ; /* No-internal DMA mapping in page_pool */
94
- pp_params .pool_size = pool_size ;
95
- pp_params .nid = node ;
96
- pp_params .dev = mdev -> device ;
97
- pp_params .dma_dir = rq -> buff .map_dir ;
98
-
99
- /* page_pool can be used even when there is no rq->xdp_prog,
100
- * given page_pool does not handle DMA mapping there is no
101
- * required state to clear. And page_pool gracefully handle
102
- * elevated refcnt.
103
- */
104
- rq -> page_pool = page_pool_create (& pp_params );
105
- if (IS_ERR (rq -> page_pool )) {
106
- err = PTR_ERR (rq -> page_pool );
107
- rq -> page_pool = NULL ;
108
- goto err_free_di_list ;
109
- }
110
- for (i = 0 ; i < wq_sz ; i ++ ) {
111
- struct mlx5e_rx_wqe_cyc * wqe =
112
- mlx5_wq_cyc_get_wqe (& rq -> wqe .wq , i );
113
- int f ;
114
-
115
- for (f = 0 ; f < rq -> wqe .info .num_frags ; f ++ ) {
116
- u32 frag_size = rq -> wqe .info .arr [f ].frag_size |
117
- MLX5_HW_START_PADDING ;
118
-
119
- wqe -> data [f ].byte_count = cpu_to_be32 (frag_size );
120
- wqe -> data [f ].lkey = rq -> mkey_be ;
121
- }
122
- /* check if num_frags is not a pow of two */
123
- if (rq -> wqe .info .num_frags < (1 << rq -> wqe .info .log_num_frags )) {
124
- wqe -> data [f ].byte_count = 0 ;
125
- wqe -> data [f ].lkey = cpu_to_be32 (MLX5_INVALID_LKEY );
126
- wqe -> data [f ].addr = 0 ;
127
- }
128
- }
129
- return 0 ;
130
-
131
- err_free_di_list :
35
+ page_pool_destroy (rq -> page_pool );
132
36
mlx5e_free_di_list (rq );
133
- err_free_frags :
134
37
kvfree (rq -> wqe .frags );
135
- err_wq_cyc_destroy :
136
38
mlx5_wq_destroy (& rq -> wq_ctrl );
137
-
138
- return err ;
139
39
}
140
40
141
- static void mlx5e_free_trap_rq (struct mlx5e_rq * rq )
41
+ static void mlx5e_init_trap_rq (struct mlx5e_trap * t , struct mlx5e_params * params ,
42
+ struct mlx5e_rq * rq )
142
43
{
143
- page_pool_destroy (rq -> page_pool );
144
- mlx5e_free_di_list (rq );
145
- kvfree (rq -> wqe .frags );
146
- mlx5_wq_destroy (& rq -> wq_ctrl );
44
+ struct mlx5_core_dev * mdev = t -> mdev ;
45
+ struct mlx5e_priv * priv = t -> priv ;
46
+
47
+ rq -> wq_type = params -> rq_wq_type ;
48
+ rq -> pdev = mdev -> device ;
49
+ rq -> netdev = priv -> netdev ;
50
+ rq -> priv = priv ;
51
+ rq -> clock = & mdev -> clock ;
52
+ rq -> tstamp = & priv -> tstamp ;
53
+ rq -> mdev = mdev ;
54
+ rq -> hw_mtu = MLX5E_SW2HW_MTU (params , params -> sw_mtu );
55
+ rq -> stats = & priv -> trap_stats .rq ;
56
+ rq -> ptp_cyc2time = mlx5_rq_ts_translator (mdev );
57
+ xdp_rxq_info_unused (& rq -> xdp_rxq );
58
+ mlx5e_rq_set_trap_handlers (rq , params );
147
59
}
148
60
149
- static int mlx5e_open_trap_rq (struct mlx5e_priv * priv , struct napi_struct * napi ,
150
- struct mlx5e_rq_stats * stats , struct mlx5e_params * params ,
151
- struct mlx5e_rq_param * rq_param ,
152
- struct mlx5e_ch_stats * ch_stats ,
153
- struct mlx5e_rq * rq )
61
+ static int mlx5e_open_trap_rq (struct mlx5e_priv * priv , struct mlx5e_trap * t )
154
62
{
63
+ struct mlx5e_rq_param * rq_param = & t -> rq_param ;
155
64
struct mlx5_core_dev * mdev = priv -> mdev ;
156
65
struct mlx5e_create_cq_param ccp = {};
157
66
struct dim_cq_moder trap_moder = {};
158
- struct mlx5e_cq * cq = & rq -> cq ;
67
+ struct mlx5e_rq * rq = & t -> rq ;
68
+ int node ;
159
69
int err ;
160
70
161
- ccp .node = dev_to_node (mdev -> device );
162
- ccp .ch_stats = ch_stats ;
163
- ccp .napi = napi ;
71
+ node = dev_to_node (mdev -> device );
72
+
73
+ ccp .node = node ;
74
+ ccp .ch_stats = t -> stats ;
75
+ ccp .napi = & t -> napi ;
164
76
ccp .ix = 0 ;
165
- err = mlx5e_open_cq (priv , trap_moder , & rq_param -> cqp , & ccp , cq );
77
+ err = mlx5e_open_cq (priv , trap_moder , & rq_param -> cqp , & ccp , & rq -> cq );
166
78
if (err )
167
79
return err ;
168
80
169
- err = mlx5e_alloc_trap_rq (priv , rq_param , stats , params , ch_stats , rq );
81
+ mlx5e_init_trap_rq (t , & t -> params , rq );
82
+ err = mlx5e_open_rq (& t -> params , rq_param , NULL , node , rq );
170
83
if (err )
171
84
goto err_destroy_cq ;
172
85
173
- err = mlx5e_create_rq (rq , rq_param );
174
- if (err )
175
- goto err_free_rq ;
176
-
177
- err = mlx5e_modify_rq_state (rq , MLX5_RQC_STATE_RST , MLX5_RQC_STATE_RDY );
178
- if (err )
179
- goto err_destroy_rq ;
180
-
181
86
return 0 ;
182
87
183
- err_destroy_rq :
184
- mlx5e_destroy_rq (rq );
185
- mlx5e_free_rx_descs (rq );
186
- err_free_rq :
187
- mlx5e_free_trap_rq (rq );
188
88
err_destroy_cq :
189
- mlx5e_close_cq (cq );
89
+ mlx5e_close_cq (& rq -> cq );
190
90
191
91
return err ;
192
92
}
@@ -273,11 +173,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
273
173
274
174
netif_napi_add (netdev , & t -> napi , mlx5e_trap_napi_poll , 64 );
275
175
276
- err = mlx5e_open_trap_rq (priv , & t -> napi ,
277
- & priv -> trap_stats .rq ,
278
- & t -> params , & t -> rq_param ,
279
- & priv -> trap_stats .ch ,
280
- & t -> rq );
176
+ err = mlx5e_open_trap_rq (priv , t );
281
177
if (unlikely (err ))
282
178
goto err_napi_del ;
283
179
0 commit comments