@@ -271,7 +271,7 @@ static void init_eq_buf(struct mlx5_eq *eq)
271
271
struct mlx5_eqe * eqe ;
272
272
int i ;
273
273
274
- for (i = 0 ; i < eq -> nent ; i ++ ) {
274
+ for (i = 0 ; i < eq_get_size ( eq ) ; i ++ ) {
275
275
eqe = get_eqe (eq , i );
276
276
eqe -> owner = MLX5_EQE_OWNER_INIT_VAL ;
277
277
}
@@ -281,8 +281,10 @@ static int
281
281
create_map_eq (struct mlx5_core_dev * dev , struct mlx5_eq * eq ,
282
282
struct mlx5_eq_param * param )
283
283
{
284
+ u8 log_eq_size = order_base_2 (param -> nent + MLX5_NUM_SPARE_EQE );
284
285
struct mlx5_cq_table * cq_table = & eq -> cq_table ;
285
286
u32 out [MLX5_ST_SZ_DW (create_eq_out )] = {0 };
287
+ u8 log_eq_stride = ilog2 (MLX5_EQE_SIZE );
286
288
struct mlx5_priv * priv = & dev -> priv ;
287
289
u8 vecidx = param -> irq_index ;
288
290
__be64 * pas ;
@@ -297,16 +299,18 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
297
299
spin_lock_init (& cq_table -> lock );
298
300
INIT_RADIX_TREE (& cq_table -> tree , GFP_ATOMIC );
299
301
300
- eq -> nent = roundup_pow_of_two (param -> nent + MLX5_NUM_SPARE_EQE );
301
302
eq -> cons_index = 0 ;
302
- err = mlx5_buf_alloc (dev , eq -> nent * MLX5_EQE_SIZE , & eq -> buf );
303
+
304
+ err = mlx5_frag_buf_alloc_node (dev , wq_get_byte_sz (log_eq_size , log_eq_stride ),
305
+ & eq -> frag_buf , dev -> priv .numa_node );
303
306
if (err )
304
307
return err ;
305
308
309
+ mlx5_init_fbc (eq -> frag_buf .frags , log_eq_stride , log_eq_size , & eq -> fbc );
306
310
init_eq_buf (eq );
307
311
308
312
inlen = MLX5_ST_SZ_BYTES (create_eq_in ) +
309
- MLX5_FLD_SZ_BYTES (create_eq_in , pas [0 ]) * eq -> buf .npages ;
313
+ MLX5_FLD_SZ_BYTES (create_eq_in , pas [0 ]) * eq -> frag_buf .npages ;
310
314
311
315
in = kvzalloc (inlen , GFP_KERNEL );
312
316
if (!in ) {
@@ -315,7 +319,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
315
319
}
316
320
317
321
pas = (__be64 * )MLX5_ADDR_OF (create_eq_in , in , pas );
318
- mlx5_fill_page_array (& eq -> buf , pas );
322
+ mlx5_fill_page_frag_array (& eq -> frag_buf , pas );
319
323
320
324
MLX5_SET (create_eq_in , in , opcode , MLX5_CMD_OP_CREATE_EQ );
321
325
if (!param -> mask [0 ] && MLX5_CAP_GEN (dev , log_max_uctx ))
@@ -326,11 +330,11 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
326
330
param -> mask [i ]);
327
331
328
332
eqc = MLX5_ADDR_OF (create_eq_in , in , eq_context_entry );
329
- MLX5_SET (eqc , eqc , log_eq_size , ilog2 ( eq -> nent ) );
333
+ MLX5_SET (eqc , eqc , log_eq_size , eq -> fbc . log_sz );
330
334
MLX5_SET (eqc , eqc , uar_page , priv -> uar -> index );
331
335
MLX5_SET (eqc , eqc , intr , vecidx );
332
336
MLX5_SET (eqc , eqc , log_page_size ,
333
- eq -> buf .page_shift - MLX5_ADAPTER_PAGE_SHIFT );
337
+ eq -> frag_buf .page_shift - MLX5_ADAPTER_PAGE_SHIFT );
334
338
335
339
err = mlx5_cmd_exec (dev , in , inlen , out , sizeof (out ));
336
340
if (err )
@@ -356,7 +360,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
356
360
kvfree (in );
357
361
358
362
err_buf :
359
- mlx5_buf_free (dev , & eq -> buf );
363
+ mlx5_frag_buf_free (dev , & eq -> frag_buf );
360
364
return err ;
361
365
}
362
366
@@ -413,7 +417,7 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
413
417
eq -> eqn );
414
418
synchronize_irq (eq -> irqn );
415
419
416
- mlx5_buf_free (dev , & eq -> buf );
420
+ mlx5_frag_buf_free (dev , & eq -> frag_buf );
417
421
418
422
return err ;
419
423
}
@@ -764,10 +768,11 @@ EXPORT_SYMBOL(mlx5_eq_destroy_generic);
764
768
struct mlx5_eqe * mlx5_eq_get_eqe (struct mlx5_eq * eq , u32 cc )
765
769
{
766
770
u32 ci = eq -> cons_index + cc ;
771
+ u32 nent = eq_get_size (eq );
767
772
struct mlx5_eqe * eqe ;
768
773
769
- eqe = get_eqe (eq , ci & (eq -> nent - 1 ));
770
- eqe = ((eqe -> owner & 1 ) ^ !!(ci & eq -> nent )) ? NULL : eqe ;
774
+ eqe = get_eqe (eq , ci & (nent - 1 ));
775
+ eqe = ((eqe -> owner & 1 ) ^ !!(ci & nent )) ? NULL : eqe ;
771
776
/* Make sure we read EQ entry contents after we've
772
777
* checked the ownership bit.
773
778
*/
0 commit comments