49
49
50
50
static int clean_mr (struct mlx5_ib_mr * mr );
51
51
static int use_umr (struct mlx5_ib_dev * dev , int order );
52
+ static int unreg_umr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr );
52
53
53
54
static int destroy_mkey (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
54
55
{
@@ -149,6 +150,9 @@ static void reg_mr_callback(int status, void *context)
149
150
if (err )
150
151
pr_err ("Error inserting to mkey tree. 0x%x\n" , - err );
151
152
write_unlock_irqrestore (& table -> lock , flags );
153
+
154
+ if (!completion_done (& ent -> compl ))
155
+ complete (& ent -> compl );
152
156
}
153
157
154
158
static int add_keys (struct mlx5_ib_dev * dev , int c , int num )
@@ -157,7 +161,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
157
161
struct mlx5_cache_ent * ent = & cache -> ent [c ];
158
162
int inlen = MLX5_ST_SZ_BYTES (create_mkey_in );
159
163
struct mlx5_ib_mr * mr ;
160
- int npages = 1 << ent -> order ;
161
164
void * mkc ;
162
165
u32 * in ;
163
166
int err = 0 ;
@@ -185,11 +188,11 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
185
188
186
189
MLX5_SET (mkc , mkc , free , 1 );
187
190
MLX5_SET (mkc , mkc , umr_en , 1 );
188
- MLX5_SET (mkc , mkc , access_mode , MLX5_MKC_ACCESS_MODE_MTT );
191
+ MLX5_SET (mkc , mkc , access_mode , ent -> access_mode );
189
192
190
193
MLX5_SET (mkc , mkc , qpn , 0xffffff );
191
- MLX5_SET (mkc , mkc , translations_octword_size , ( npages + 1 ) / 2 );
192
- MLX5_SET (mkc , mkc , log_page_size , 12 );
194
+ MLX5_SET (mkc , mkc , translations_octword_size , ent -> xlt );
195
+ MLX5_SET (mkc , mkc , log_page_size , ent -> page );
193
196
194
197
spin_lock_irq (& ent -> lock );
195
198
ent -> pending ++ ;
@@ -447,6 +450,42 @@ static void cache_work_func(struct work_struct *work)
447
450
__cache_work_func (ent );
448
451
}
449
452
453
+ struct mlx5_ib_mr * mlx5_mr_cache_alloc (struct mlx5_ib_dev * dev , int entry )
454
+ {
455
+ struct mlx5_mr_cache * cache = & dev -> cache ;
456
+ struct mlx5_cache_ent * ent ;
457
+ struct mlx5_ib_mr * mr ;
458
+ int err ;
459
+
460
+ if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES ) {
461
+ mlx5_ib_err (dev , "cache entry %d is out of range\n" , entry );
462
+ return NULL ;
463
+ }
464
+
465
+ ent = & cache -> ent [entry ];
466
+ while (1 ) {
467
+ spin_lock_irq (& ent -> lock );
468
+ if (list_empty (& ent -> head )) {
469
+ spin_unlock_irq (& ent -> lock );
470
+
471
+ err = add_keys (dev , entry , 1 );
472
+ if (err )
473
+ return ERR_PTR (err );
474
+
475
+ wait_for_completion (& ent -> compl );
476
+ } else {
477
+ mr = list_first_entry (& ent -> head , struct mlx5_ib_mr ,
478
+ list );
479
+ list_del (& mr -> list );
480
+ ent -> cur -- ;
481
+ spin_unlock_irq (& ent -> lock );
482
+ if (ent -> cur < ent -> limit )
483
+ queue_work (cache -> wq , & ent -> work );
484
+ return mr ;
485
+ }
486
+ }
487
+ }
488
+
450
489
static struct mlx5_ib_mr * alloc_cached_mr (struct mlx5_ib_dev * dev , int order )
451
490
{
452
491
struct mlx5_mr_cache * cache = & dev -> cache ;
@@ -456,12 +495,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
456
495
int i ;
457
496
458
497
c = order2idx (dev , order );
459
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES ) {
498
+ if (c < 0 || c > MAX_UMR_CACHE_ENTRY ) {
460
499
mlx5_ib_warn (dev , "order %d, cache index %d\n" , order , c );
461
500
return NULL ;
462
501
}
463
502
464
- for (i = c ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
503
+ for (i = c ; i < MAX_UMR_CACHE_ENTRY ; i ++ ) {
465
504
ent = & cache -> ent [i ];
466
505
467
506
mlx5_ib_dbg (dev , "order %d, cache index %d\n" , ent -> order , i );
@@ -488,7 +527,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
488
527
return mr ;
489
528
}
490
529
491
- static void free_cached_mr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
530
+ void mlx5_mr_cache_free (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
492
531
{
493
532
struct mlx5_mr_cache * cache = & dev -> cache ;
494
533
struct mlx5_cache_ent * ent ;
@@ -500,6 +539,10 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
500
539
mlx5_ib_warn (dev , "order %d, cache index %d\n" , mr -> order , c );
501
540
return ;
502
541
}
542
+
543
+ if (unreg_umr (dev , mr ))
544
+ return ;
545
+
503
546
ent = & cache -> ent [c ];
504
547
spin_lock_irq (& ent -> lock );
505
548
list_add_tail (& mr -> list , & ent -> head );
@@ -602,7 +645,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
602
645
{
603
646
struct mlx5_mr_cache * cache = & dev -> cache ;
604
647
struct mlx5_cache_ent * ent ;
605
- int limit ;
606
648
int err ;
607
649
int i ;
608
650
@@ -615,26 +657,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
615
657
616
658
setup_timer (& dev -> delay_timer , delay_time_func , (unsigned long )dev );
617
659
for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
618
- INIT_LIST_HEAD (& cache -> ent [i ].head );
619
- spin_lock_init (& cache -> ent [i ].lock );
620
-
621
660
ent = & cache -> ent [i ];
622
661
INIT_LIST_HEAD (& ent -> head );
623
662
spin_lock_init (& ent -> lock );
624
663
ent -> order = i + 2 ;
625
664
ent -> dev = dev ;
665
+ ent -> limit = 0 ;
626
666
627
- if ((dev -> mdev -> profile -> mask & MLX5_PROF_MASK_MR_CACHE ) &&
628
- mlx5_core_is_pf (dev -> mdev ) &&
629
- use_umr (dev , ent -> order ))
630
- limit = dev -> mdev -> profile -> mr_cache [i ].limit ;
631
- else
632
- limit = 0 ;
633
-
667
+ init_completion (& ent -> compl );
634
668
INIT_WORK (& ent -> work , cache_work_func );
635
669
INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
636
- ent -> limit = limit ;
637
670
queue_work (cache -> wq , & ent -> work );
671
+
672
+ if (i > MAX_UMR_CACHE_ENTRY )
673
+ continue ;
674
+
675
+ if (!use_umr (dev , ent -> order ))
676
+ continue ;
677
+
678
+ ent -> page = PAGE_SHIFT ;
679
+ ent -> xlt = (1 << ent -> order ) * sizeof (struct mlx5_mtt ) /
680
+ MLX5_IB_UMR_OCTOWORD ;
681
+ ent -> access_mode = MLX5_MKC_ACCESS_MODE_MTT ;
682
+ if ((dev -> mdev -> profile -> mask & MLX5_PROF_MASK_MR_CACHE ) &&
683
+ mlx5_core_is_pf (dev -> mdev ))
684
+ ent -> limit = dev -> mdev -> profile -> mr_cache [i ].limit ;
685
+ else
686
+ ent -> limit = 0 ;
638
687
}
639
688
640
689
err = mlx5_mr_cache_debugfs_init (dev );
@@ -758,7 +807,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
758
807
static int use_umr (struct mlx5_ib_dev * dev , int order )
759
808
{
760
809
if (MLX5_CAP_GEN (dev -> mdev , umr_extended_translation_offset ))
761
- return order < MAX_MR_CACHE_ENTRIES + 2 ;
810
+ return order <= MAX_UMR_CACHE_ENTRY + 2 ;
762
811
return order <= MLX5_MAX_UMR_SHIFT ;
763
812
}
764
813
@@ -871,7 +920,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
871
920
MLX5_IB_UPD_XLT_ENABLE );
872
921
873
922
if (err ) {
874
- free_cached_mr (dev , mr );
923
+ mlx5_mr_cache_free (dev , mr );
875
924
return ERR_PTR (err );
876
925
}
877
926
@@ -1091,6 +1140,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1091
1140
goto err_2 ;
1092
1141
}
1093
1142
mr -> mmkey .type = MLX5_MKEY_MR ;
1143
+ mr -> desc_size = sizeof (struct mlx5_mtt );
1094
1144
mr -> umem = umem ;
1095
1145
mr -> dev = dev ;
1096
1146
mr -> live = 1 ;
@@ -1398,12 +1448,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
1398
1448
return err ;
1399
1449
}
1400
1450
} else {
1401
- err = unreg_umr (dev , mr );
1402
- if (err ) {
1403
- mlx5_ib_warn (dev , "failed unregister\n" );
1404
- return err ;
1405
- }
1406
- free_cached_mr (dev , mr );
1451
+ mlx5_mr_cache_free (dev , mr );
1407
1452
}
1408
1453
1409
1454
if (!umred )
0 commit comments