@@ -231,3 +231,238 @@ void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
231
231
mlx5_cmd_free_uar (mdev , uar -> index );
232
232
}
233
233
EXPORT_SYMBOL (mlx5_unmap_free_uar );
234
+
235
+ static int uars_per_sys_page (struct mlx5_core_dev * mdev )
236
+ {
237
+ if (MLX5_CAP_GEN (mdev , uar_4k ))
238
+ return MLX5_CAP_GEN (mdev , num_of_uars_per_page );
239
+
240
+ return 1 ;
241
+ }
242
+
243
+ static u64 uar2pfn (struct mlx5_core_dev * mdev , u32 index )
244
+ {
245
+ u32 system_page_index ;
246
+
247
+ if (MLX5_CAP_GEN (mdev , uar_4k ))
248
+ system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT );
249
+ else
250
+ system_page_index = index ;
251
+
252
+ return (pci_resource_start (mdev -> pdev , 0 ) >> PAGE_SHIFT ) + system_page_index ;
253
+ }
254
+
255
+ static void up_rel_func (struct kref * kref )
256
+ {
257
+ struct mlx5_uars_page * up = container_of (kref , struct mlx5_uars_page , ref_count );
258
+
259
+ list_del (& up -> list );
260
+ if (mlx5_cmd_free_uar (up -> mdev , up -> index ))
261
+ mlx5_core_warn (up -> mdev , "failed to free uar index %d\n" , up -> index );
262
+ kfree (up -> reg_bitmap );
263
+ kfree (up -> fp_bitmap );
264
+ kfree (up );
265
+ }
266
+
267
+ static struct mlx5_uars_page * alloc_uars_page (struct mlx5_core_dev * mdev ,
268
+ bool map_wc )
269
+ {
270
+ struct mlx5_uars_page * up ;
271
+ int err = - ENOMEM ;
272
+ phys_addr_t pfn ;
273
+ int bfregs ;
274
+ int i ;
275
+
276
+ bfregs = uars_per_sys_page (mdev ) * MLX5_BFREGS_PER_UAR ;
277
+ up = kzalloc (sizeof (* up ), GFP_KERNEL );
278
+ if (!up )
279
+ return ERR_PTR (err );
280
+
281
+ up -> mdev = mdev ;
282
+ up -> reg_bitmap = kcalloc (BITS_TO_LONGS (bfregs ), sizeof (unsigned long ), GFP_KERNEL );
283
+ if (!up -> reg_bitmap )
284
+ goto error1 ;
285
+
286
+ up -> fp_bitmap = kcalloc (BITS_TO_LONGS (bfregs ), sizeof (unsigned long ), GFP_KERNEL );
287
+ if (!up -> fp_bitmap )
288
+ goto error1 ;
289
+
290
+ for (i = 0 ; i < bfregs ; i ++ )
291
+ if ((i % MLX5_BFREGS_PER_UAR ) < MLX5_NON_FP_BFREGS_PER_UAR )
292
+ set_bit (i , up -> reg_bitmap );
293
+ else
294
+ set_bit (i , up -> fp_bitmap );
295
+
296
+ up -> bfregs = bfregs ;
297
+ up -> fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR ;
298
+ up -> reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR ;
299
+
300
+ err = mlx5_cmd_alloc_uar (mdev , & up -> index );
301
+ if (err ) {
302
+ mlx5_core_warn (mdev , "mlx5_cmd_alloc_uar() failed, %d\n" , err );
303
+ goto error1 ;
304
+ }
305
+
306
+ pfn = uar2pfn (mdev , up -> index );
307
+ if (map_wc ) {
308
+ up -> map = ioremap_wc (pfn << PAGE_SHIFT , PAGE_SIZE );
309
+ if (!up -> map ) {
310
+ err = - EAGAIN ;
311
+ goto error2 ;
312
+ }
313
+ } else {
314
+ up -> map = ioremap (pfn << PAGE_SHIFT , PAGE_SIZE );
315
+ if (!up -> map ) {
316
+ err = - ENOMEM ;
317
+ goto error2 ;
318
+ }
319
+ }
320
+ kref_init (& up -> ref_count );
321
+ mlx5_core_dbg (mdev , "allocated UAR page: index %d, total bfregs %d\n" ,
322
+ up -> index , up -> bfregs );
323
+ return up ;
324
+
325
+ error2 :
326
+ if (mlx5_cmd_free_uar (mdev , up -> index ))
327
+ mlx5_core_warn (mdev , "failed to free uar index %d\n" , up -> index );
328
+ error1 :
329
+ kfree (up -> fp_bitmap );
330
+ kfree (up -> reg_bitmap );
331
+ kfree (up );
332
+ return ERR_PTR (err );
333
+ }
334
+
335
+ static unsigned long map_offset (struct mlx5_core_dev * mdev , int dbi )
336
+ {
337
+ /* return the offset in bytes from the start of the page to the
338
+ * blue flame area of the UAR
339
+ */
340
+ return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
341
+ (dbi % MLX5_BFREGS_PER_UAR ) *
342
+ (1 << MLX5_CAP_GEN (mdev , log_bf_reg_size )) + MLX5_BF_OFFSET ;
343
+ }
344
+
345
+ static int alloc_bfreg (struct mlx5_core_dev * mdev , struct mlx5_sq_bfreg * bfreg ,
346
+ bool map_wc , bool fast_path )
347
+ {
348
+ struct mlx5_bfreg_data * bfregs ;
349
+ struct mlx5_uars_page * up ;
350
+ struct list_head * head ;
351
+ unsigned long * bitmap ;
352
+ unsigned int * avail ;
353
+ struct mutex * lock ; /* pointer to right mutex */
354
+ int dbi ;
355
+
356
+ bfregs = & mdev -> priv .bfregs ;
357
+ if (map_wc ) {
358
+ head = & bfregs -> wc_head .list ;
359
+ lock = & bfregs -> wc_head .lock ;
360
+ } else {
361
+ head = & bfregs -> reg_head .list ;
362
+ lock = & bfregs -> reg_head .lock ;
363
+ }
364
+ mutex_lock (lock );
365
+ if (list_empty (head )) {
366
+ up = alloc_uars_page (mdev , map_wc );
367
+ if (IS_ERR (up )) {
368
+ mutex_unlock (lock );
369
+ return PTR_ERR (up );
370
+ }
371
+ list_add (& up -> list , head );
372
+ } else {
373
+ up = list_entry (head -> next , struct mlx5_uars_page , list );
374
+ kref_get (& up -> ref_count );
375
+ }
376
+ if (fast_path ) {
377
+ bitmap = up -> fp_bitmap ;
378
+ avail = & up -> fp_avail ;
379
+ } else {
380
+ bitmap = up -> reg_bitmap ;
381
+ avail = & up -> reg_avail ;
382
+ }
383
+ dbi = find_first_bit (bitmap , up -> bfregs );
384
+ clear_bit (dbi , bitmap );
385
+ (* avail )-- ;
386
+ if (!(* avail ))
387
+ list_del (& up -> list );
388
+
389
+ bfreg -> map = up -> map + map_offset (mdev , dbi );
390
+ bfreg -> up = up ;
391
+ bfreg -> wc = map_wc ;
392
+ bfreg -> index = up -> index + dbi / MLX5_BFREGS_PER_UAR ;
393
+ mutex_unlock (lock );
394
+
395
+ return 0 ;
396
+ }
397
+
398
+ int mlx5_alloc_bfreg (struct mlx5_core_dev * mdev , struct mlx5_sq_bfreg * bfreg ,
399
+ bool map_wc , bool fast_path )
400
+ {
401
+ int err ;
402
+
403
+ err = alloc_bfreg (mdev , bfreg , map_wc , fast_path );
404
+ if (!err )
405
+ return 0 ;
406
+
407
+ if (err == - EAGAIN && map_wc )
408
+ return alloc_bfreg (mdev , bfreg , false, fast_path );
409
+
410
+ return err ;
411
+ }
412
+ EXPORT_SYMBOL (mlx5_alloc_bfreg );
413
+
414
+ static unsigned int addr_to_dbi_in_syspage (struct mlx5_core_dev * dev ,
415
+ struct mlx5_uars_page * up ,
416
+ struct mlx5_sq_bfreg * bfreg )
417
+ {
418
+ unsigned int uar_idx ;
419
+ unsigned int bfreg_idx ;
420
+ unsigned int bf_reg_size ;
421
+
422
+ bf_reg_size = 1 << MLX5_CAP_GEN (dev , log_bf_reg_size );
423
+
424
+ uar_idx = (bfreg -> map - up -> map ) >> MLX5_ADAPTER_PAGE_SHIFT ;
425
+ bfreg_idx = (((uintptr_t )bfreg -> map % MLX5_ADAPTER_PAGE_SIZE ) - MLX5_BF_OFFSET ) / bf_reg_size ;
426
+
427
+ return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx ;
428
+ }
429
+
430
+ void mlx5_free_bfreg (struct mlx5_core_dev * mdev , struct mlx5_sq_bfreg * bfreg )
431
+ {
432
+ struct mlx5_bfreg_data * bfregs ;
433
+ struct mlx5_uars_page * up ;
434
+ struct mutex * lock ; /* pointer to right mutex */
435
+ unsigned int dbi ;
436
+ bool fp ;
437
+ unsigned int * avail ;
438
+ unsigned long * bitmap ;
439
+ struct list_head * head ;
440
+
441
+ bfregs = & mdev -> priv .bfregs ;
442
+ if (bfreg -> wc ) {
443
+ head = & bfregs -> wc_head .list ;
444
+ lock = & bfregs -> wc_head .lock ;
445
+ } else {
446
+ head = & bfregs -> reg_head .list ;
447
+ lock = & bfregs -> reg_head .lock ;
448
+ }
449
+ up = bfreg -> up ;
450
+ dbi = addr_to_dbi_in_syspage (mdev , up , bfreg );
451
+ fp = (dbi % MLX5_BFREGS_PER_UAR ) >= MLX5_NON_FP_BFREGS_PER_UAR ;
452
+ if (fp ) {
453
+ avail = & up -> fp_avail ;
454
+ bitmap = up -> fp_bitmap ;
455
+ } else {
456
+ avail = & up -> reg_avail ;
457
+ bitmap = up -> reg_bitmap ;
458
+ }
459
+ mutex_lock (lock );
460
+ (* avail )++ ;
461
+ set_bit (dbi , bitmap );
462
+ if (* avail == 1 )
463
+ list_add_tail (& up -> list , head );
464
+
465
+ kref_put (& up -> ref_count , up_rel_func );
466
+ mutex_unlock (lock );
467
+ }
468
+ EXPORT_SYMBOL (mlx5_free_bfreg );
0 commit comments