Skip to content

Commit 49780d4

Browse files
Artemy-Mellanoxdledford
authored andcommitted
IB/mlx5: Expose MR cache for mlx5_ib
Allow other parts of mlx5_ib to use MR cache mechanism. * Add new functions mlx5_mr_cache_alloc and mlx5_mr_cache_free * Traditional MTT MKey buckets are limited by MAX_UMR_CACHE_ENTRY Additinal buckets may be added above. Signed-off-by: Artemy Kovalyov <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
1 parent 94990b4 commit 49780d4

File tree

3 files changed

+82
-29
lines changed

3 files changed

+82
-29
lines changed

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -541,6 +541,10 @@ struct mlx5_cache_ent {
541541
struct dentry *dir;
542542
char name[4];
543543
u32 order;
544+
u32 xlt;
545+
u32 access_mode;
546+
u32 page;
547+
544548
u32 size;
545549
u32 cur;
546550
u32 miss;
@@ -555,6 +559,7 @@ struct mlx5_cache_ent {
555559
struct work_struct work;
556560
struct delayed_work dwork;
557561
int pending;
562+
struct completion compl;
558563
};
559564

560565
struct mlx5_mr_cache {
@@ -837,7 +842,9 @@ void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
837842
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
838843
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
839844
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
840-
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
845+
846+
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
847+
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
841848
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
842849
struct ib_mr_status *mr_status);
843850
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 72 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ enum {
4949

5050
static int clean_mr(struct mlx5_ib_mr *mr);
5151
static int use_umr(struct mlx5_ib_dev *dev, int order);
52+
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
5253

5354
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
5455
{
@@ -149,6 +150,9 @@ static void reg_mr_callback(int status, void *context)
149150
if (err)
150151
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
151152
write_unlock_irqrestore(&table->lock, flags);
153+
154+
if (!completion_done(&ent->compl))
155+
complete(&ent->compl);
152156
}
153157

154158
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
@@ -157,7 +161,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
157161
struct mlx5_cache_ent *ent = &cache->ent[c];
158162
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
159163
struct mlx5_ib_mr *mr;
160-
int npages = 1 << ent->order;
161164
void *mkc;
162165
u32 *in;
163166
int err = 0;
@@ -185,11 +188,11 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
185188

186189
MLX5_SET(mkc, mkc, free, 1);
187190
MLX5_SET(mkc, mkc, umr_en, 1);
188-
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
191+
MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
189192

190193
MLX5_SET(mkc, mkc, qpn, 0xffffff);
191-
MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
192-
MLX5_SET(mkc, mkc, log_page_size, 12);
194+
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
195+
MLX5_SET(mkc, mkc, log_page_size, ent->page);
193196

194197
spin_lock_irq(&ent->lock);
195198
ent->pending++;
@@ -447,6 +450,42 @@ static void cache_work_func(struct work_struct *work)
447450
__cache_work_func(ent);
448451
}
449452

453+
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
454+
{
455+
struct mlx5_mr_cache *cache = &dev->cache;
456+
struct mlx5_cache_ent *ent;
457+
struct mlx5_ib_mr *mr;
458+
int err;
459+
460+
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
461+
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
462+
return NULL;
463+
}
464+
465+
ent = &cache->ent[entry];
466+
while (1) {
467+
spin_lock_irq(&ent->lock);
468+
if (list_empty(&ent->head)) {
469+
spin_unlock_irq(&ent->lock);
470+
471+
err = add_keys(dev, entry, 1);
472+
if (err)
473+
return ERR_PTR(err);
474+
475+
wait_for_completion(&ent->compl);
476+
} else {
477+
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
478+
list);
479+
list_del(&mr->list);
480+
ent->cur--;
481+
spin_unlock_irq(&ent->lock);
482+
if (ent->cur < ent->limit)
483+
queue_work(cache->wq, &ent->work);
484+
return mr;
485+
}
486+
}
487+
}
488+
450489
static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
451490
{
452491
struct mlx5_mr_cache *cache = &dev->cache;
@@ -456,12 +495,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
456495
int i;
457496

458497
c = order2idx(dev, order);
459-
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
498+
if (c < 0 || c > MAX_UMR_CACHE_ENTRY) {
460499
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
461500
return NULL;
462501
}
463502

464-
for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
503+
for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) {
465504
ent = &cache->ent[i];
466505

467506
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
@@ -488,7 +527,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
488527
return mr;
489528
}
490529

491-
static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
530+
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
492531
{
493532
struct mlx5_mr_cache *cache = &dev->cache;
494533
struct mlx5_cache_ent *ent;
@@ -500,6 +539,10 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
500539
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
501540
return;
502541
}
542+
543+
if (unreg_umr(dev, mr))
544+
return;
545+
503546
ent = &cache->ent[c];
504547
spin_lock_irq(&ent->lock);
505548
list_add_tail(&mr->list, &ent->head);
@@ -602,7 +645,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
602645
{
603646
struct mlx5_mr_cache *cache = &dev->cache;
604647
struct mlx5_cache_ent *ent;
605-
int limit;
606648
int err;
607649
int i;
608650

@@ -615,26 +657,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
615657

616658
setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
617659
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
618-
INIT_LIST_HEAD(&cache->ent[i].head);
619-
spin_lock_init(&cache->ent[i].lock);
620-
621660
ent = &cache->ent[i];
622661
INIT_LIST_HEAD(&ent->head);
623662
spin_lock_init(&ent->lock);
624663
ent->order = i + 2;
625664
ent->dev = dev;
665+
ent->limit = 0;
626666

627-
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
628-
mlx5_core_is_pf(dev->mdev) &&
629-
use_umr(dev, ent->order))
630-
limit = dev->mdev->profile->mr_cache[i].limit;
631-
else
632-
limit = 0;
633-
667+
init_completion(&ent->compl);
634668
INIT_WORK(&ent->work, cache_work_func);
635669
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
636-
ent->limit = limit;
637670
queue_work(cache->wq, &ent->work);
671+
672+
if (i > MAX_UMR_CACHE_ENTRY)
673+
continue;
674+
675+
if (!use_umr(dev, ent->order))
676+
continue;
677+
678+
ent->page = PAGE_SHIFT;
679+
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
680+
MLX5_IB_UMR_OCTOWORD;
681+
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
682+
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
683+
mlx5_core_is_pf(dev->mdev))
684+
ent->limit = dev->mdev->profile->mr_cache[i].limit;
685+
else
686+
ent->limit = 0;
638687
}
639688

640689
err = mlx5_mr_cache_debugfs_init(dev);
@@ -758,7 +807,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
758807
static int use_umr(struct mlx5_ib_dev *dev, int order)
759808
{
760809
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
761-
return order < MAX_MR_CACHE_ENTRIES + 2;
810+
return order <= MAX_UMR_CACHE_ENTRY + 2;
762811
return order <= MLX5_MAX_UMR_SHIFT;
763812
}
764813

@@ -871,7 +920,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
871920
MLX5_IB_UPD_XLT_ENABLE);
872921

873922
if (err) {
874-
free_cached_mr(dev, mr);
923+
mlx5_mr_cache_free(dev, mr);
875924
return ERR_PTR(err);
876925
}
877926

@@ -1091,6 +1140,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
10911140
goto err_2;
10921141
}
10931142
mr->mmkey.type = MLX5_MKEY_MR;
1143+
mr->desc_size = sizeof(struct mlx5_mtt);
10941144
mr->umem = umem;
10951145
mr->dev = dev;
10961146
mr->live = 1;
@@ -1398,12 +1448,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
13981448
return err;
13991449
}
14001450
} else {
1401-
err = unreg_umr(dev, mr);
1402-
if (err) {
1403-
mlx5_ib_warn(dev, "failed unregister\n");
1404-
return err;
1405-
}
1406-
free_cached_mr(dev, mr);
1451+
mlx5_mr_cache_free(dev, mr);
14071452
}
14081453

14091454
if (!umred)

include/linux/mlx5/driver.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1052,7 +1052,8 @@ enum {
10521052
};
10531053

10541054
enum {
1055-
MAX_MR_CACHE_ENTRIES = 21,
1055+
MAX_UMR_CACHE_ENTRY = 20,
1056+
MAX_MR_CACHE_ENTRIES
10561057
};
10571058

10581059
enum {

0 commit comments

Comments
 (0)