@@ -1229,7 +1229,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1229
1229
*/
1230
1230
static int exynos_iommu_map (struct iommu_domain * iommu_domain ,
1231
1231
unsigned long l_iova , phys_addr_t paddr , size_t size ,
1232
- int prot , gfp_t gfp )
1232
+ size_t count , int prot , gfp_t gfp , size_t * mapped )
1233
1233
{
1234
1234
struct exynos_iommu_domain * domain = to_exynos_domain (iommu_domain );
1235
1235
sysmmu_pte_t * entry ;
@@ -1263,6 +1263,8 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1263
1263
if (ret )
1264
1264
pr_err ("%s: Failed(%d) to map %#zx bytes @ %#x\n" ,
1265
1265
__func__ , ret , size , iova );
1266
+ else
1267
+ * mapped = size ;
1266
1268
1267
1269
spin_unlock_irqrestore (& domain -> pgtablelock , flags );
1268
1270
@@ -1284,7 +1286,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
1284
1286
}
1285
1287
1286
1288
static size_t exynos_iommu_unmap (struct iommu_domain * iommu_domain ,
1287
- unsigned long l_iova , size_t size ,
1289
+ unsigned long l_iova , size_t size , size_t count ,
1288
1290
struct iommu_iotlb_gather * gather )
1289
1291
{
1290
1292
struct exynos_iommu_domain * domain = to_exynos_domain (iommu_domain );
@@ -1477,8 +1479,8 @@ static const struct iommu_ops exynos_iommu_ops = {
1477
1479
.of_xlate = exynos_iommu_of_xlate ,
1478
1480
.default_domain_ops = & (const struct iommu_domain_ops ) {
1479
1481
.attach_dev = exynos_iommu_attach_device ,
1480
- .map = exynos_iommu_map ,
1481
- .unmap = exynos_iommu_unmap ,
1482
+ .map_pages = exynos_iommu_map ,
1483
+ .unmap_pages = exynos_iommu_unmap ,
1482
1484
.iova_to_phys = exynos_iommu_iova_to_phys ,
1483
1485
.free = exynos_iommu_domain_free ,
1484
1486
}
0 commit comments