@@ -1521,8 +1521,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1521
1521
struct mlx5_ib_dev * dev = to_mdev (pd -> device );
1522
1522
struct mlx5_create_mkey_mbox_in * in ;
1523
1523
struct mlx5_ib_mr * mr ;
1524
- int access_mode , err ;
1525
- int ndescs = roundup ( max_num_sg , 4 ) ;
1524
+ int ndescs = ALIGN ( max_num_sg , 4 ) ;
1525
+ int err ;
1526
1526
1527
1527
mr = kzalloc (sizeof (* mr ), GFP_KERNEL );
1528
1528
if (!mr )
@@ -1540,7 +1540,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1540
1540
in -> seg .flags_pd = cpu_to_be32 (to_mpd (pd )-> pdn );
1541
1541
1542
1542
if (mr_type == IB_MR_TYPE_MEM_REG ) {
1543
- access_mode = MLX5_ACCESS_MODE_MTT ;
1543
+ mr -> access_mode = MLX5_ACCESS_MODE_MTT ;
1544
1544
in -> seg .log2_page_size = PAGE_SHIFT ;
1545
1545
1546
1546
err = mlx5_alloc_priv_descs (pd -> device , mr ,
@@ -1550,6 +1550,15 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1550
1550
1551
1551
mr -> desc_size = sizeof (u64 );
1552
1552
mr -> max_descs = ndescs ;
1553
+ } else if (mr_type == IB_MR_TYPE_SG_GAPS ) {
1554
+ mr -> access_mode = MLX5_ACCESS_MODE_KLM ;
1555
+
1556
+ err = mlx5_alloc_priv_descs (pd -> device , mr ,
1557
+ ndescs , sizeof (struct mlx5_klm ));
1558
+ if (err )
1559
+ goto err_free_in ;
1560
+ mr -> desc_size = sizeof (struct mlx5_klm );
1561
+ mr -> max_descs = ndescs ;
1553
1562
} else if (mr_type == IB_MR_TYPE_SIGNATURE ) {
1554
1563
u32 psv_index [2 ];
1555
1564
@@ -1568,7 +1577,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1568
1577
if (err )
1569
1578
goto err_free_sig ;
1570
1579
1571
- access_mode = MLX5_ACCESS_MODE_KLM ;
1580
+ mr -> access_mode = MLX5_ACCESS_MODE_KLM ;
1572
1581
mr -> sig -> psv_memory .psv_idx = psv_index [0 ];
1573
1582
mr -> sig -> psv_wire .psv_idx = psv_index [1 ];
1574
1583
@@ -1582,7 +1591,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1582
1591
goto err_free_in ;
1583
1592
}
1584
1593
1585
- in -> seg .flags = MLX5_PERM_UMR_EN | access_mode ;
1594
+ in -> seg .flags = MLX5_PERM_UMR_EN | mr -> access_mode ;
1586
1595
err = mlx5_core_create_mkey (dev -> mdev , & mr -> mmkey , in , sizeof (* in ),
1587
1596
NULL , NULL , NULL );
1588
1597
if (err )
@@ -1739,6 +1748,32 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1739
1748
return ret ;
1740
1749
}
1741
1750
1751
+ static int
1752
+ mlx5_ib_sg_to_klms (struct mlx5_ib_mr * mr ,
1753
+ struct scatterlist * sgl ,
1754
+ unsigned short sg_nents )
1755
+ {
1756
+ struct scatterlist * sg = sgl ;
1757
+ struct mlx5_klm * klms = mr -> descs ;
1758
+ u32 lkey = mr -> ibmr .pd -> local_dma_lkey ;
1759
+ int i ;
1760
+
1761
+ mr -> ibmr .iova = sg_dma_address (sg );
1762
+ mr -> ibmr .length = 0 ;
1763
+ mr -> ndescs = sg_nents ;
1764
+
1765
+ for_each_sg (sgl , sg , sg_nents , i ) {
1766
+ if (unlikely (i > mr -> max_descs ))
1767
+ break ;
1768
+ klms [i ].va = cpu_to_be64 (sg_dma_address (sg ));
1769
+ klms [i ].bcount = cpu_to_be32 (sg_dma_len (sg ));
1770
+ klms [i ].key = cpu_to_be32 (lkey );
1771
+ mr -> ibmr .length += sg_dma_len (sg );
1772
+ }
1773
+
1774
+ return i ;
1775
+ }
1776
+
1742
1777
static int mlx5_set_page (struct ib_mr * ibmr , u64 addr )
1743
1778
{
1744
1779
struct mlx5_ib_mr * mr = to_mmr (ibmr );
@@ -1766,7 +1801,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
1766
1801
mr -> desc_size * mr -> max_descs ,
1767
1802
DMA_TO_DEVICE );
1768
1803
1769
- n = ib_sg_to_pages (ibmr , sg , sg_nents , mlx5_set_page );
1804
+ if (mr -> access_mode == MLX5_ACCESS_MODE_KLM )
1805
+ n = mlx5_ib_sg_to_klms (mr , sg , sg_nents );
1806
+ else
1807
+ n = ib_sg_to_pages (ibmr , sg , sg_nents , mlx5_set_page );
1770
1808
1771
1809
ib_dma_sync_single_for_device (ibmr -> device , mr -> desc_map ,
1772
1810
mr -> desc_size * mr -> max_descs ,
0 commit comments