@@ -2074,6 +2074,24 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2074
2074
virt_to_page (dev -> mdev -> clock_info ));
2075
2075
}
2076
2076
2077
+ static void mlx5_ib_mmap_free (struct rdma_user_mmap_entry * entry )
2078
+ {
2079
+ struct mlx5_user_mmap_entry * mentry = to_mmmap (entry );
2080
+ struct mlx5_ib_dev * dev = to_mdev (entry -> ucontext -> device );
2081
+ struct mlx5_ib_dm * mdm ;
2082
+
2083
+ switch (mentry -> mmap_flag ) {
2084
+ case MLX5_IB_MMAP_TYPE_MEMIC :
2085
+ mdm = container_of (mentry , struct mlx5_ib_dm , mentry );
2086
+ mlx5_cmd_dealloc_memic (& dev -> dm , mdm -> dev_addr ,
2087
+ mdm -> size );
2088
+ kfree (mdm );
2089
+ break ;
2090
+ default :
2091
+ WARN_ON (true);
2092
+ }
2093
+ }
2094
+
2077
2095
static int uar_mmap (struct mlx5_ib_dev * dev , enum mlx5_ib_mmap_cmd cmd ,
2078
2096
struct vm_area_struct * vma ,
2079
2097
struct mlx5_ib_ucontext * context )
@@ -2186,26 +2204,55 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2186
2204
return err ;
2187
2205
}
2188
2206
2189
- static int dm_mmap (struct ib_ucontext * context , struct vm_area_struct * vma )
2207
+ static int add_dm_mmap_entry (struct ib_ucontext * context ,
2208
+ struct mlx5_ib_dm * mdm ,
2209
+ u64 address )
2210
+ {
2211
+ mdm -> mentry .mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC ;
2212
+ mdm -> mentry .address = address ;
2213
+ return rdma_user_mmap_entry_insert_range (
2214
+ context , & mdm -> mentry .rdma_entry ,
2215
+ mdm -> size ,
2216
+ MLX5_IB_MMAP_DEVICE_MEM << 16 ,
2217
+ (MLX5_IB_MMAP_DEVICE_MEM << 16 ) + (1UL << 16 ) - 1 );
2218
+ }
2219
+
2220
+ static unsigned long mlx5_vma_to_pgoff (struct vm_area_struct * vma )
2221
+ {
2222
+ unsigned long idx ;
2223
+ u8 command ;
2224
+
2225
+ command = get_command (vma -> vm_pgoff );
2226
+ idx = get_extended_index (vma -> vm_pgoff );
2227
+
2228
+ return (command << 16 | idx );
2229
+ }
2230
+
2231
+ static int mlx5_ib_mmap_offset (struct mlx5_ib_dev * dev ,
2232
+ struct vm_area_struct * vma ,
2233
+ struct ib_ucontext * ucontext )
2190
2234
{
2191
- struct mlx5_ib_ucontext * mctx = to_mucontext (context );
2192
- struct mlx5_ib_dev * dev = to_mdev (context -> device );
2193
- u16 page_idx = get_extended_index (vma -> vm_pgoff );
2194
- size_t map_size = vma -> vm_end - vma -> vm_start ;
2195
- u32 npages = map_size >> PAGE_SHIFT ;
2235
+ struct mlx5_user_mmap_entry * mentry ;
2236
+ struct rdma_user_mmap_entry * entry ;
2237
+ unsigned long pgoff ;
2238
+ pgprot_t prot ;
2196
2239
phys_addr_t pfn ;
2240
+ int ret ;
2197
2241
2198
- if (find_next_zero_bit (mctx -> dm_pages , page_idx + npages , page_idx ) !=
2199
- page_idx + npages )
2242
+ pgoff = mlx5_vma_to_pgoff (vma );
2243
+ entry = rdma_user_mmap_entry_get_pgoff (ucontext , pgoff );
2244
+ if (!entry )
2200
2245
return - EINVAL ;
2201
2246
2202
- pfn = ((dev -> mdev -> bar_addr +
2203
- MLX5_CAP64_DEV_MEM (dev -> mdev , memic_bar_start_addr )) >>
2204
- PAGE_SHIFT ) +
2205
- page_idx ;
2206
- return rdma_user_mmap_io (context , vma , pfn , map_size ,
2207
- pgprot_writecombine (vma -> vm_page_prot ),
2208
- NULL );
2247
+ mentry = to_mmmap (entry );
2248
+ pfn = (mentry -> address >> PAGE_SHIFT );
2249
+ prot = pgprot_writecombine (vma -> vm_page_prot );
2250
+ ret = rdma_user_mmap_io (ucontext , vma , pfn ,
2251
+ entry -> npages * PAGE_SIZE ,
2252
+ prot ,
2253
+ entry );
2254
+ rdma_user_mmap_entry_put (& mentry -> rdma_entry );
2255
+ return ret ;
2209
2256
}
2210
2257
2211
2258
static int mlx5_ib_mmap (struct ib_ucontext * ibcontext , struct vm_area_struct * vma )
@@ -2248,11 +2295,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
2248
2295
case MLX5_IB_MMAP_CLOCK_INFO :
2249
2296
return mlx5_ib_mmap_clock_info_page (dev , vma , context );
2250
2297
2251
- case MLX5_IB_MMAP_DEVICE_MEM :
2252
- return dm_mmap (ibcontext , vma );
2253
-
2254
2298
default :
2255
- return - EINVAL ;
2299
+ return mlx5_ib_mmap_offset ( dev , vma , ibcontext ) ;
2256
2300
}
2257
2301
2258
2302
return 0 ;
@@ -2288,8 +2332,9 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2288
2332
{
2289
2333
struct mlx5_dm * dm_db = & to_mdev (ctx -> device )-> dm ;
2290
2334
u64 start_offset ;
2291
- u32 page_idx ;
2335
+ u16 page_idx ;
2292
2336
int err ;
2337
+ u64 address ;
2293
2338
2294
2339
dm -> size = roundup (attr -> length , MLX5_MEMIC_BASE_SIZE );
2295
2340
@@ -2298,28 +2343,30 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2298
2343
if (err )
2299
2344
return err ;
2300
2345
2301
- page_idx = (dm -> dev_addr - pci_resource_start (dm_db -> dev -> pdev , 0 ) -
2302
- MLX5_CAP64_DEV_MEM (dm_db -> dev , memic_bar_start_addr )) >>
2303
- PAGE_SHIFT ;
2346
+ address = dm -> dev_addr & PAGE_MASK ;
2347
+ err = add_dm_mmap_entry (ctx , dm , address );
2348
+ if (err )
2349
+ goto err_dealloc ;
2304
2350
2351
+ page_idx = dm -> mentry .rdma_entry .start_pgoff & 0xFFFF ;
2305
2352
err = uverbs_copy_to (attrs ,
2306
2353
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX ,
2307
- & page_idx , sizeof (page_idx ));
2354
+ & page_idx ,
2355
+ sizeof (page_idx ));
2308
2356
if (err )
2309
- goto err_dealloc ;
2357
+ goto err_copy ;
2310
2358
2311
2359
start_offset = dm -> dev_addr & ~PAGE_MASK ;
2312
2360
err = uverbs_copy_to (attrs ,
2313
2361
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET ,
2314
2362
& start_offset , sizeof (start_offset ));
2315
2363
if (err )
2316
- goto err_dealloc ;
2317
-
2318
- bitmap_set (to_mucontext (ctx )-> dm_pages , page_idx ,
2319
- DIV_ROUND_UP (dm -> size , PAGE_SIZE ));
2364
+ goto err_copy ;
2320
2365
2321
2366
return 0 ;
2322
2367
2368
+ err_copy :
2369
+ rdma_user_mmap_entry_remove (& dm -> mentry .rdma_entry );
2323
2370
err_dealloc :
2324
2371
mlx5_cmd_dealloc_memic (dm_db , dm -> dev_addr , dm -> size );
2325
2372
@@ -2423,23 +2470,13 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2423
2470
struct mlx5_ib_ucontext * ctx = rdma_udata_to_drv_context (
2424
2471
& attrs -> driver_udata , struct mlx5_ib_ucontext , ibucontext );
2425
2472
struct mlx5_core_dev * dev = to_mdev (ibdm -> device )-> mdev ;
2426
- struct mlx5_dm * dm_db = & to_mdev (ibdm -> device )-> dm ;
2427
2473
struct mlx5_ib_dm * dm = to_mdm (ibdm );
2428
- u32 page_idx ;
2429
2474
int ret ;
2430
2475
2431
2476
switch (dm -> type ) {
2432
2477
case MLX5_IB_UAPI_DM_TYPE_MEMIC :
2433
- ret = mlx5_cmd_dealloc_memic (dm_db , dm -> dev_addr , dm -> size );
2434
- if (ret )
2435
- return ret ;
2436
-
2437
- page_idx = (dm -> dev_addr - pci_resource_start (dev -> pdev , 0 ) -
2438
- MLX5_CAP64_DEV_MEM (dev , memic_bar_start_addr )) >>
2439
- PAGE_SHIFT ;
2440
- bitmap_clear (ctx -> dm_pages , page_idx ,
2441
- DIV_ROUND_UP (dm -> size , PAGE_SIZE ));
2442
- break ;
2478
+ rdma_user_mmap_entry_remove (& dm -> mentry .rdma_entry );
2479
+ return 0 ;
2443
2480
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM :
2444
2481
ret = mlx5_dm_sw_icm_dealloc (dev , MLX5_SW_ICM_TYPE_STEERING ,
2445
2482
dm -> size , ctx -> devx_uid , dm -> dev_addr ,
@@ -3544,10 +3581,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3544
3581
}
3545
3582
3546
3583
INIT_LIST_HEAD (& handler -> list );
3547
- if (dst ) {
3548
- memcpy (& dest_arr [0 ], dst , sizeof (* dst ));
3549
- dest_num ++ ;
3550
- }
3551
3584
3552
3585
for (spec_index = 0 ; spec_index < flow_attr -> num_of_specs ; spec_index ++ ) {
3553
3586
err = parse_flow_attr (dev -> mdev , spec ,
@@ -3560,6 +3593,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3560
3593
ib_flow += ((union ib_flow_spec * )ib_flow )-> size ;
3561
3594
}
3562
3595
3596
+ if (dst && !(flow_act .action & MLX5_FLOW_CONTEXT_ACTION_DROP )) {
3597
+ memcpy (& dest_arr [0 ], dst , sizeof (* dst ));
3598
+ dest_num ++ ;
3599
+ }
3600
+
3563
3601
if (!flow_is_multicast_only (flow_attr ))
3564
3602
set_underlay_qp (dev , spec , underlay_qpn );
3565
3603
@@ -3600,10 +3638,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3600
3638
}
3601
3639
3602
3640
if (flow_act .action & MLX5_FLOW_CONTEXT_ACTION_DROP ) {
3603
- if (!( flow_act . action & MLX5_FLOW_CONTEXT_ACTION_COUNT )) {
3641
+ if (!dest_num )
3604
3642
rule_dst = NULL ;
3605
- dest_num = 0 ;
3606
- }
3607
3643
} else {
3608
3644
if (is_egress )
3609
3645
flow_act .action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW ;
@@ -6236,6 +6272,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
6236
6272
.map_mr_sg = mlx5_ib_map_mr_sg ,
6237
6273
.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi ,
6238
6274
.mmap = mlx5_ib_mmap ,
6275
+ .mmap_free = mlx5_ib_mmap_free ,
6239
6276
.modify_cq = mlx5_ib_modify_cq ,
6240
6277
.modify_device = mlx5_ib_modify_device ,
6241
6278
.modify_port = mlx5_ib_modify_port ,
0 commit comments