Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 4136ce9

Browse files
rmurphy-armChristoph Hellwig
authored andcommitted
ARM/dma-mapping: merge IOMMU ops
The dma_sync_* operations are now the only difference between the coherent and non-coherent IOMMU ops. Some minor tweaks to make those safe for coherent devices with minimal overhead, and we can condense down to a single set of DMA ops. Signed-off-by: Robin Murphy <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Marc Zyngier <[email protected]>
1 parent d563bcc commit 4136ce9

File tree

1 file changed

+13
-24
lines changed

1 file changed

+13
-24
lines changed

arch/arm/mm/dma-mapping.c

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
13411341
struct scatterlist *s;
13421342
int i;
13431343

1344+
if (dev->dma_coherent)
1345+
return;
1346+
13441347
for_each_sg(sg, s, nents, i)
13451348
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
13461349

@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
13601363
struct scatterlist *s;
13611364
int i;
13621365

1366+
if (dev->dma_coherent)
1367+
return;
1368+
13631369
for_each_sg(sg, s, nents, i)
13641370
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
13651371
}
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
14931499
{
14941500
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14951501
dma_addr_t iova = handle & PAGE_MASK;
1496-
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1502+
struct page *page;
14971503
unsigned int offset = handle & ~PAGE_MASK;
14981504

1499-
if (!iova)
1505+
if (dev->dma_coherent || !iova)
15001506
return;
15011507

1508+
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
15021509
__dma_page_dev_to_cpu(page, offset, size, dir);
15031510
}
15041511

@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
15071514
{
15081515
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
15091516
dma_addr_t iova = handle & PAGE_MASK;
1510-
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1517+
struct page *page;
15111518
unsigned int offset = handle & ~PAGE_MASK;
15121519

1513-
if (!iova)
1520+
if (dev->dma_coherent || !iova)
15141521
return;
15151522

1523+
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
15161524
__dma_page_cpu_to_dev(page, offset, size, dir);
15171525
}
15181526

@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
15361544
.unmap_resource = arm_iommu_unmap_resource,
15371545
};
15381546

1539-
static const struct dma_map_ops iommu_coherent_ops = {
1540-
.alloc = arm_iommu_alloc_attrs,
1541-
.free = arm_iommu_free_attrs,
1542-
.mmap = arm_iommu_mmap_attrs,
1543-
.get_sgtable = arm_iommu_get_sgtable,
1544-
1545-
.map_page = arm_iommu_map_page,
1546-
.unmap_page = arm_iommu_unmap_page,
1547-
1548-
.map_sg = arm_iommu_map_sg,
1549-
.unmap_sg = arm_iommu_unmap_sg,
1550-
1551-
.map_resource = arm_iommu_map_resource,
1552-
.unmap_resource = arm_iommu_unmap_resource,
1553-
};
1554-
15551547
/**
15561548
* arm_iommu_create_mapping
15571549
* @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
17501742
return;
17511743
}
17521744

1753-
if (coherent)
1754-
set_dma_ops(dev, &iommu_coherent_ops);
1755-
else
1756-
set_dma_ops(dev, &iommu_ops);
1745+
set_dma_ops(dev, &iommu_ops);
17571746
}
17581747

17591748
static void arm_teardown_iommu_dma_ops(struct device *dev)

0 commit comments

Comments
 (0)