@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1341
1341
struct scatterlist * s ;
1342
1342
int i ;
1343
1343
1344
+ if (dev -> dma_coherent )
1345
+ return ;
1346
+
1344
1347
for_each_sg (sg , s , nents , i )
1345
1348
__dma_page_dev_to_cpu (sg_page (s ), s -> offset , s -> length , dir );
1346
1349
@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
1360
1363
struct scatterlist * s ;
1361
1364
int i ;
1362
1365
1366
+ if (dev -> dma_coherent )
1367
+ return ;
1368
+
1363
1369
for_each_sg (sg , s , nents , i )
1364
1370
__dma_page_cpu_to_dev (sg_page (s ), s -> offset , s -> length , dir );
1365
1371
}
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
1493
1499
{
1494
1500
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1495
1501
dma_addr_t iova = handle & PAGE_MASK ;
1496
- struct page * page = phys_to_page ( iommu_iova_to_phys ( mapping -> domain , iova )) ;
1502
+ struct page * page ;
1497
1503
unsigned int offset = handle & ~PAGE_MASK ;
1498
1504
1499
- if (!iova )
1505
+ if (dev -> dma_coherent || !iova )
1500
1506
return ;
1501
1507
1508
+ page = phys_to_page (iommu_iova_to_phys (mapping -> domain , iova ));
1502
1509
__dma_page_dev_to_cpu (page , offset , size , dir );
1503
1510
}
1504
1511
@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
1507
1514
{
1508
1515
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1509
1516
dma_addr_t iova = handle & PAGE_MASK ;
1510
- struct page * page = phys_to_page ( iommu_iova_to_phys ( mapping -> domain , iova )) ;
1517
+ struct page * page ;
1511
1518
unsigned int offset = handle & ~PAGE_MASK ;
1512
1519
1513
- if (!iova )
1520
+ if (dev -> dma_coherent || !iova )
1514
1521
return ;
1515
1522
1523
+ page = phys_to_page (iommu_iova_to_phys (mapping -> domain , iova ));
1516
1524
__dma_page_cpu_to_dev (page , offset , size , dir );
1517
1525
}
1518
1526
@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
1536
1544
.unmap_resource = arm_iommu_unmap_resource ,
1537
1545
};
1538
1546
1539
- static const struct dma_map_ops iommu_coherent_ops = {
1540
- .alloc = arm_iommu_alloc_attrs ,
1541
- .free = arm_iommu_free_attrs ,
1542
- .mmap = arm_iommu_mmap_attrs ,
1543
- .get_sgtable = arm_iommu_get_sgtable ,
1544
-
1545
- .map_page = arm_iommu_map_page ,
1546
- .unmap_page = arm_iommu_unmap_page ,
1547
-
1548
- .map_sg = arm_iommu_map_sg ,
1549
- .unmap_sg = arm_iommu_unmap_sg ,
1550
-
1551
- .map_resource = arm_iommu_map_resource ,
1552
- .unmap_resource = arm_iommu_unmap_resource ,
1553
- };
1554
-
1555
1547
/**
1556
1548
* arm_iommu_create_mapping
1557
1549
* @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1750
1742
return ;
1751
1743
}
1752
1744
1753
- if (coherent )
1754
- set_dma_ops (dev , & iommu_coherent_ops );
1755
- else
1756
- set_dma_ops (dev , & iommu_ops );
1745
+ set_dma_ops (dev , & iommu_ops );
1757
1746
}
1758
1747
1759
1748
static void arm_teardown_iommu_dma_ops (struct device * dev )
0 commit comments