@@ -816,7 +816,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
816
816
}
817
817
818
818
static int rk_iommu_map (struct iommu_domain * domain , unsigned long _iova ,
819
- phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
819
+ phys_addr_t paddr , size_t size , size_t count ,
820
+ int prot , gfp_t gfp , size_t * mapped )
820
821
{
821
822
struct rk_iommu_domain * rk_domain = to_rk_domain (domain );
822
823
unsigned long flags ;
@@ -849,12 +850,14 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
849
850
paddr , size , prot );
850
851
851
852
spin_unlock_irqrestore (& rk_domain -> dt_lock , flags );
853
+ if (!ret )
854
+ * mapped = size ;
852
855
853
856
return ret ;
854
857
}
855
858
856
859
static size_t rk_iommu_unmap (struct iommu_domain * domain , unsigned long _iova ,
857
- size_t size , struct iommu_iotlb_gather * gather )
860
+ size_t size , size_t count , struct iommu_iotlb_gather * gather )
858
861
{
859
862
struct rk_iommu_domain * rk_domain = to_rk_domain (domain );
860
863
unsigned long flags ;
@@ -1167,8 +1170,8 @@ static const struct iommu_ops rk_iommu_ops = {
1167
1170
.of_xlate = rk_iommu_of_xlate ,
1168
1171
.default_domain_ops = & (const struct iommu_domain_ops ) {
1169
1172
.attach_dev = rk_iommu_attach_device ,
1170
- .map = rk_iommu_map ,
1171
- .unmap = rk_iommu_unmap ,
1173
+ .map_pages = rk_iommu_map ,
1174
+ .unmap_pages = rk_iommu_unmap ,
1172
1175
.iova_to_phys = rk_iommu_iova_to_phys ,
1173
1176
.free = rk_iommu_domain_free ,
1174
1177
}
0 commit comments