@@ -147,7 +147,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
147
147
bnxt_qplib_get_guid (rdev -> netdev -> dev_addr ,
148
148
(u8 * )& ib_attr -> sys_image_guid );
149
149
ib_attr -> max_mr_size = BNXT_RE_MAX_MR_SIZE ;
150
- ib_attr -> page_size_cap = BNXT_RE_PAGE_SIZE_4K ;
150
+ ib_attr -> page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M ;
151
151
152
152
ib_attr -> vendor_id = rdev -> en_dev -> pdev -> vendor ;
153
153
ib_attr -> vendor_part_id = rdev -> en_dev -> pdev -> device ;
@@ -248,8 +248,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
248
248
IB_PORT_VENDOR_CLASS_SUP |
249
249
IB_PORT_IP_BASED_GIDS ;
250
250
251
- /* Max MSG size set to 2G for now */
252
- port_attr -> max_msg_sz = 0x80000000 ;
251
+ port_attr -> max_msg_sz = (u32 )BNXT_RE_MAX_MR_SIZE_LOW ;
253
252
port_attr -> bad_pkey_cntr = 0 ;
254
253
port_attr -> qkey_viol_cntr = 0 ;
255
254
port_attr -> pkey_tbl_len = dev_attr -> max_pkey ;
@@ -542,7 +541,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
542
541
mr -> qplib_mr .total_size = BNXT_RE_FENCE_BYTES ;
543
542
pbl_tbl = dma_addr ;
544
543
rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , & pbl_tbl ,
545
- BNXT_RE_FENCE_PBL_SIZE , false);
544
+ BNXT_RE_FENCE_PBL_SIZE , false, PAGE_SIZE );
546
545
if (rc ) {
547
546
dev_err (rdev_to_dev (rdev ), "Failed to register fence-MR\n" );
548
547
goto fail ;
@@ -3091,7 +3090,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3091
3090
3092
3091
mr -> qplib_mr .hwq .level = PBL_LVL_MAX ;
3093
3092
mr -> qplib_mr .total_size = -1 ; /* Infinte length */
3094
- rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , & pbl , 0 , false);
3093
+ rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , & pbl , 0 , false,
3094
+ PAGE_SIZE );
3095
3095
if (rc )
3096
3096
goto fail_mr ;
3097
3097
@@ -3117,10 +3117,8 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3117
3117
int rc ;
3118
3118
3119
3119
rc = bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3120
- if (rc ) {
3120
+ if (rc )
3121
3121
dev_err (rdev_to_dev (rdev ), "Dereg MR failed: %#x\n" , rc );
3122
- return rc ;
3123
- }
3124
3122
3125
3123
if (mr -> pages ) {
3126
3124
rc = bnxt_qplib_free_fast_reg_page_list (& rdev -> qplib_res ,
@@ -3183,7 +3181,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3183
3181
3184
3182
rc = bnxt_qplib_alloc_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3185
3183
if (rc )
3186
- goto fail ;
3184
+ goto bail ;
3187
3185
3188
3186
mr -> ib_mr .lkey = mr -> qplib_mr .lkey ;
3189
3187
mr -> ib_mr .rkey = mr -> ib_mr .lkey ;
@@ -3205,9 +3203,10 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3205
3203
return & mr -> ib_mr ;
3206
3204
3207
3205
fail_mr :
3208
- bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3209
- fail :
3210
3206
kfree (mr -> pages );
3207
+ fail :
3208
+ bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3209
+ bail :
3211
3210
kfree (mr );
3212
3211
return ERR_PTR (rc );
3213
3212
}
@@ -3261,6 +3260,46 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3261
3260
return rc ;
3262
3261
}
3263
3262
3263
+ static int bnxt_re_page_size_ok (int page_shift )
3264
+ {
3265
+ switch (page_shift ) {
3266
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K :
3267
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K :
3268
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K :
3269
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M :
3270
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K :
3271
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M :
3272
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M :
3273
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G :
3274
+ return 1 ;
3275
+ default :
3276
+ return 0 ;
3277
+ }
3278
+ }
3279
+
3280
+ static int fill_umem_pbl_tbl (struct ib_umem * umem , u64 * pbl_tbl_orig ,
3281
+ int page_shift )
3282
+ {
3283
+ u64 * pbl_tbl = pbl_tbl_orig ;
3284
+ u64 paddr ;
3285
+ u64 page_mask = (1ULL << page_shift ) - 1 ;
3286
+ int i , pages ;
3287
+ struct scatterlist * sg ;
3288
+ int entry ;
3289
+
3290
+ for_each_sg (umem -> sg_head .sgl , sg , umem -> nmap , entry ) {
3291
+ pages = sg_dma_len (sg ) >> PAGE_SHIFT ;
3292
+ for (i = 0 ; i < pages ; i ++ ) {
3293
+ paddr = sg_dma_address (sg ) + (i << PAGE_SHIFT );
3294
+ if (pbl_tbl == pbl_tbl_orig )
3295
+ * pbl_tbl ++ = paddr & ~page_mask ;
3296
+ else if ((paddr & page_mask ) == 0 )
3297
+ * pbl_tbl ++ = paddr ;
3298
+ }
3299
+ }
3300
+ return pbl_tbl - pbl_tbl_orig ;
3301
+ }
3302
+
3264
3303
/* uverbs */
3265
3304
struct ib_mr * bnxt_re_reg_user_mr (struct ib_pd * ib_pd , u64 start , u64 length ,
3266
3305
u64 virt_addr , int mr_access_flags ,
@@ -3270,10 +3309,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3270
3309
struct bnxt_re_dev * rdev = pd -> rdev ;
3271
3310
struct bnxt_re_mr * mr ;
3272
3311
struct ib_umem * umem ;
3273
- u64 * pbl_tbl , * pbl_tbl_orig ;
3274
- int i , umem_pgs , pages , rc ;
3275
- struct scatterlist * sg ;
3276
- int entry ;
3312
+ u64 * pbl_tbl = NULL ;
3313
+ int umem_pgs , page_shift , rc ;
3277
3314
3278
3315
if (length > BNXT_RE_MAX_MR_SIZE ) {
3279
3316
dev_err (rdev_to_dev (rdev ), "MR Size: %lld > Max supported:%ld\n" ,
@@ -3290,76 +3327,80 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3290
3327
mr -> qplib_mr .flags = __from_ib_access_flags (mr_access_flags );
3291
3328
mr -> qplib_mr .type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR ;
3292
3329
3330
+ rc = bnxt_qplib_alloc_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3331
+ if (rc ) {
3332
+ dev_err (rdev_to_dev (rdev ), "Failed to allocate MR" );
3333
+ goto free_mr ;
3334
+ }
3335
+ /* The fixed portion of the rkey is the same as the lkey */
3336
+ mr -> ib_mr .rkey = mr -> qplib_mr .rkey ;
3337
+
3293
3338
umem = ib_umem_get (ib_pd -> uobject -> context , start , length ,
3294
3339
mr_access_flags , 0 );
3295
3340
if (IS_ERR (umem )) {
3296
3341
dev_err (rdev_to_dev (rdev ), "Failed to get umem" );
3297
3342
rc = - EFAULT ;
3298
- goto free_mr ;
3343
+ goto free_mrw ;
3299
3344
}
3300
3345
mr -> ib_umem = umem ;
3301
3346
3302
- rc = bnxt_qplib_alloc_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3303
- if (rc ) {
3304
- dev_err (rdev_to_dev (rdev ), "Failed to allocate MR" );
3305
- goto release_umem ;
3306
- }
3307
- /* The fixed portion of the rkey is the same as the lkey */
3308
- mr -> ib_mr .rkey = mr -> qplib_mr .rkey ;
3309
-
3310
3347
mr -> qplib_mr .va = virt_addr ;
3311
3348
umem_pgs = ib_umem_page_count (umem );
3312
3349
if (!umem_pgs ) {
3313
3350
dev_err (rdev_to_dev (rdev ), "umem is invalid!" );
3314
3351
rc = - EINVAL ;
3315
- goto free_mrw ;
3352
+ goto free_umem ;
3316
3353
}
3317
3354
mr -> qplib_mr .total_size = length ;
3318
3355
3319
3356
pbl_tbl = kcalloc (umem_pgs , sizeof (u64 * ), GFP_KERNEL );
3320
3357
if (!pbl_tbl ) {
3321
- rc = - EINVAL ;
3322
- goto free_mrw ;
3358
+ rc = - ENOMEM ;
3359
+ goto free_umem ;
3323
3360
}
3324
- pbl_tbl_orig = pbl_tbl ;
3325
3361
3326
- if (umem -> hugetlb ) {
3327
- dev_err (rdev_to_dev (rdev ), "umem hugetlb not supported!" );
3362
+ page_shift = umem -> page_shift ;
3363
+
3364
+ if (!bnxt_re_page_size_ok (page_shift )) {
3365
+ dev_err (rdev_to_dev (rdev ), "umem page size unsupported!" );
3328
3366
rc = - EFAULT ;
3329
3367
goto fail ;
3330
3368
}
3331
3369
3332
- if (umem -> page_shift != PAGE_SHIFT ) {
3333
- dev_err (rdev_to_dev (rdev ), "umem page shift unsupported!" );
3334
- rc = - EFAULT ;
3370
+ if (!umem -> hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW ) {
3371
+ dev_err (rdev_to_dev (rdev ), "Requested MR Sz:%llu Max sup:%llu" ,
3372
+ length , (u64 )BNXT_RE_MAX_MR_SIZE_LOW );
3373
+ rc = - EINVAL ;
3335
3374
goto fail ;
3336
3375
}
3337
- /* Map umem buf ptrs to the PBL */
3338
- for_each_sg (umem -> sg_head .sgl , sg , umem -> nmap , entry ) {
3339
- pages = sg_dma_len (sg ) >> umem -> page_shift ;
3340
- for (i = 0 ; i < pages ; i ++ , pbl_tbl ++ )
3341
- * pbl_tbl = sg_dma_address (sg ) + (i << umem -> page_shift );
3376
+ if (umem -> hugetlb && length > BNXT_RE_PAGE_SIZE_2M ) {
3377
+ page_shift = BNXT_RE_PAGE_SHIFT_2M ;
3378
+ dev_warn (rdev_to_dev (rdev ), "umem hugetlb set page_size %x" ,
3379
+ 1 << page_shift );
3342
3380
}
3343
- rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , pbl_tbl_orig ,
3344
- umem_pgs , false);
3381
+
3382
+ /* Map umem buf ptrs to the PBL */
3383
+ umem_pgs = fill_umem_pbl_tbl (umem , pbl_tbl , page_shift );
3384
+ rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , pbl_tbl ,
3385
+ umem_pgs , false, 1 << page_shift );
3345
3386
if (rc ) {
3346
3387
dev_err (rdev_to_dev (rdev ), "Failed to register user MR" );
3347
3388
goto fail ;
3348
3389
}
3349
3390
3350
- kfree (pbl_tbl_orig );
3391
+ kfree (pbl_tbl );
3351
3392
3352
3393
mr -> ib_mr .lkey = mr -> qplib_mr .lkey ;
3353
3394
mr -> ib_mr .rkey = mr -> qplib_mr .lkey ;
3354
3395
atomic_inc (& rdev -> mr_count );
3355
3396
3356
3397
return & mr -> ib_mr ;
3357
3398
fail :
3358
- kfree (pbl_tbl_orig );
3399
+ kfree (pbl_tbl );
3400
+ free_umem :
3401
+ ib_umem_release (umem );
3359
3402
free_mrw :
3360
3403
bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
3361
- release_umem :
3362
- ib_umem_release (umem );
3363
3404
free_mr :
3364
3405
kfree (mr );
3365
3406
return ERR_PTR (rc );
0 commit comments