@@ -999,12 +999,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
999
999
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1000
1000
struct mlx5_ib_alloc_ucontext_resp resp = {};
1001
1001
struct mlx5_ib_ucontext * context ;
1002
- struct mlx5_uuar_info * uuari ;
1002
+ struct mlx5_bfreg_info * bfregi ;
1003
1003
struct mlx5_uar * uars ;
1004
- int gross_uuars ;
1004
+ int gross_bfregs ;
1005
1005
int num_uars ;
1006
1006
int ver ;
1007
- int uuarn ;
1007
+ int bfregn ;
1008
1008
int err ;
1009
1009
int i ;
1010
1010
size_t reqlen ;
@@ -1032,10 +1032,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1032
1032
if (req .flags )
1033
1033
return ERR_PTR (- EINVAL );
1034
1034
1035
- if (req .total_num_uuars > MLX5_MAX_UUARS )
1035
+ if (req .total_num_bfregs > MLX5_MAX_BFREGS )
1036
1036
return ERR_PTR (- ENOMEM );
1037
1037
1038
- if (req .total_num_uuars == 0 )
1038
+ if (req .total_num_bfregs == 0 )
1039
1039
return ERR_PTR (- EINVAL );
1040
1040
1041
1041
if (req .comp_mask || req .reserved0 || req .reserved1 || req .reserved2 )
@@ -1046,13 +1046,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1046
1046
reqlen - sizeof (req )))
1047
1047
return ERR_PTR (- EOPNOTSUPP );
1048
1048
1049
- req .total_num_uuars = ALIGN (req .total_num_uuars ,
1050
- MLX5_NON_FP_BF_REGS_PER_PAGE );
1051
- if (req .num_low_latency_uuars > req .total_num_uuars - 1 )
1049
+ req .total_num_bfregs = ALIGN (req .total_num_bfregs ,
1050
+ MLX5_NON_FP_BFREGS_PER_UAR );
1051
+ if (req .num_low_latency_bfregs > req .total_num_bfregs - 1 )
1052
1052
return ERR_PTR (- EINVAL );
1053
1053
1054
- num_uars = req .total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE ;
1055
- gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE ;
1054
+ num_uars = req .total_num_bfregs / MLX5_NON_FP_BFREGS_PER_UAR ;
1055
+ gross_bfregs = num_uars * MLX5_BFREGS_PER_UAR ;
1056
1056
resp .qp_tab_size = 1 << MLX5_CAP_GEN (dev -> mdev , log_max_qp );
1057
1057
if (mlx5_core_is_pf (dev -> mdev ) && MLX5_CAP_GEN (dev -> mdev , bf ))
1058
1058
resp .bf_reg_size = 1 << MLX5_CAP_GEN (dev -> mdev , log_bf_reg_size );
@@ -1072,32 +1072,33 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1072
1072
if (!context )
1073
1073
return ERR_PTR (- ENOMEM );
1074
1074
1075
- uuari = & context -> uuari ;
1076
- mutex_init (& uuari -> lock );
1075
+ bfregi = & context -> bfregi ;
1076
+ mutex_init (& bfregi -> lock );
1077
1077
uars = kcalloc (num_uars , sizeof (* uars ), GFP_KERNEL );
1078
1078
if (!uars ) {
1079
1079
err = - ENOMEM ;
1080
1080
goto out_ctx ;
1081
1081
}
1082
1082
1083
- uuari -> bitmap = kcalloc (BITS_TO_LONGS (gross_uuars ),
1084
- sizeof (* uuari -> bitmap ),
1083
+ bfregi -> bitmap = kcalloc (BITS_TO_LONGS (gross_bfregs ),
1084
+ sizeof (* bfregi -> bitmap ),
1085
1085
GFP_KERNEL );
1086
- if (!uuari -> bitmap ) {
1086
+ if (!bfregi -> bitmap ) {
1087
1087
err = - ENOMEM ;
1088
1088
goto out_uar_ctx ;
1089
1089
}
1090
1090
/*
1091
- * clear all fast path uuars
1091
+ * clear all fast path bfregs
1092
1092
*/
1093
- for (i = 0 ; i < gross_uuars ; i ++ ) {
1094
- uuarn = i & 3 ;
1095
- if (uuarn == 2 || uuarn == 3 )
1096
- set_bit (i , uuari -> bitmap );
1093
+ for (i = 0 ; i < gross_bfregs ; i ++ ) {
1094
+ bfregn = i & 3 ;
1095
+ if (bfregn == 2 || bfregn == 3 )
1096
+ set_bit (i , bfregi -> bitmap );
1097
1097
}
1098
1098
1099
- uuari -> count = kcalloc (gross_uuars , sizeof (* uuari -> count ), GFP_KERNEL );
1100
- if (!uuari -> count ) {
1099
+ bfregi -> count = kcalloc (gross_bfregs ,
1100
+ sizeof (* bfregi -> count ), GFP_KERNEL );
1101
+ if (!bfregi -> count ) {
1101
1102
err = - ENOMEM ;
1102
1103
goto out_bitmap ;
1103
1104
}
@@ -1130,7 +1131,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1130
1131
INIT_LIST_HEAD (& context -> db_page_list );
1131
1132
mutex_init (& context -> db_page_mutex );
1132
1133
1133
- resp .tot_uuars = req .total_num_uuars ;
1134
+ resp .tot_bfregs = req .total_num_bfregs ;
1134
1135
resp .num_ports = MLX5_CAP_GEN (dev -> mdev , num_ports );
1135
1136
1136
1137
if (field_avail (typeof (resp ), cqe_version , udata -> outlen ))
@@ -1163,10 +1164,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1163
1164
if (err )
1164
1165
goto out_td ;
1165
1166
1166
- uuari -> ver = ver ;
1167
- uuari -> num_low_latency_uuars = req .num_low_latency_uuars ;
1168
- uuari -> uars = uars ;
1169
- uuari -> num_uars = num_uars ;
1167
+ bfregi -> ver = ver ;
1168
+ bfregi -> num_low_latency_bfregs = req .num_low_latency_bfregs ;
1169
+ bfregi -> uars = uars ;
1170
+ bfregi -> num_uars = num_uars ;
1170
1171
context -> cqe_version = resp .cqe_version ;
1171
1172
1172
1173
return & context -> ibucontext ;
@@ -1182,10 +1183,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1182
1183
for (i -- ; i >= 0 ; i -- )
1183
1184
mlx5_cmd_free_uar (dev -> mdev , uars [i ].index );
1184
1185
out_count :
1185
- kfree (uuari -> count );
1186
+ kfree (bfregi -> count );
1186
1187
1187
1188
out_bitmap :
1188
- kfree (uuari -> bitmap );
1189
+ kfree (bfregi -> bitmap );
1189
1190
1190
1191
out_uar_ctx :
1191
1192
kfree (uars );
@@ -1199,22 +1200,23 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1199
1200
{
1200
1201
struct mlx5_ib_ucontext * context = to_mucontext (ibcontext );
1201
1202
struct mlx5_ib_dev * dev = to_mdev (ibcontext -> device );
1202
- struct mlx5_uuar_info * uuari = & context -> uuari ;
1203
+ struct mlx5_bfreg_info * bfregi = & context -> bfregi ;
1203
1204
int i ;
1204
1205
1205
1206
if (MLX5_CAP_GEN (dev -> mdev , log_max_transport_domain ))
1206
1207
mlx5_core_dealloc_transport_domain (dev -> mdev , context -> tdn );
1207
1208
1208
1209
free_page (context -> upd_xlt_page );
1209
1210
1210
- for (i = 0 ; i < uuari -> num_uars ; i ++ ) {
1211
- if (mlx5_cmd_free_uar (dev -> mdev , uuari -> uars [i ].index ))
1212
- mlx5_ib_warn (dev , "failed to free UAR 0x%x\n" , uuari -> uars [i ].index );
1211
+ for (i = 0 ; i < bfregi -> num_uars ; i ++ ) {
1212
+ if (mlx5_cmd_free_uar (dev -> mdev , bfregi -> uars [i ].index ))
1213
+ mlx5_ib_warn (dev , "Failed to free UAR 0x%x\n" ,
1214
+ bfregi -> uars [i ].index );
1213
1215
}
1214
1216
1215
- kfree (uuari -> count );
1216
- kfree (uuari -> bitmap );
1217
- kfree (uuari -> uars );
1217
+ kfree (bfregi -> count );
1218
+ kfree (bfregi -> bitmap );
1219
+ kfree (bfregi -> uars );
1218
1220
kfree (context );
1219
1221
1220
1222
return 0 ;
@@ -1377,7 +1379,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1377
1379
struct vm_area_struct * vma ,
1378
1380
struct mlx5_ib_ucontext * context )
1379
1381
{
1380
- struct mlx5_uuar_info * uuari = & context -> uuari ;
1382
+ struct mlx5_bfreg_info * bfregi = & context -> bfregi ;
1381
1383
int err ;
1382
1384
unsigned long idx ;
1383
1385
phys_addr_t pfn , pa ;
@@ -1408,10 +1410,10 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1408
1410
return - EINVAL ;
1409
1411
1410
1412
idx = get_index (vma -> vm_pgoff );
1411
- if (idx >= uuari -> num_uars )
1413
+ if (idx >= bfregi -> num_uars )
1412
1414
return - EINVAL ;
1413
1415
1414
- pfn = uar_index2pfn (dev , uuari -> uars [idx ].index );
1416
+ pfn = uar_index2pfn (dev , bfregi -> uars [idx ].index );
1415
1417
mlx5_ib_dbg (dev , "uar idx 0x%lx, pfn %pa\n" , idx , & pfn );
1416
1418
1417
1419
vma -> vm_page_prot = prot ;
0 commit comments