Skip to content

Commit d563bcc

Browse files
rmurphy-armChristoph Hellwig
authored andcommitted
ARM/dma-mapping: consolidate IOMMU ops callbacks
Merge the coherent and non-coherent callbacks down to a single implementation each, relying on the generic dev->dma_coherent flag at the points where the difference matters. Signed-off-by: Robin Murphy <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Marc Zyngier <[email protected]>
1 parent 42998ef commit d563bcc

File tree

1 file changed

+55
-183
lines changed

1 file changed

+55
-183
lines changed

arch/arm/mm/dma-mapping.c

Lines changed: 55 additions & 183 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,13 +1079,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
10791079
__free_from_pool(cpu_addr, size);
10801080
}
10811081

1082-
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1083-
dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1084-
int coherent_flag)
1082+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1083+
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
10851084
{
10861085
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
10871086
struct page **pages;
10881087
void *addr = NULL;
1088+
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
10891089

10901090
*handle = DMA_MAPPING_ERROR;
10911091
size = PAGE_ALIGN(size);
@@ -1128,19 +1128,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
11281128
return NULL;
11291129
}
11301130

1131-
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1132-
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1133-
{
1134-
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1135-
}
1136-
1137-
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1138-
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1139-
{
1140-
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1141-
}
1142-
1143-
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1131+
static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
11441132
void *cpu_addr, dma_addr_t dma_addr, size_t size,
11451133
unsigned long attrs)
11461134
{
@@ -1154,35 +1142,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
11541142
if (vma->vm_pgoff >= nr_pages)
11551143
return -ENXIO;
11561144

1145+
if (!dev->dma_coherent)
1146+
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1147+
11571148
err = vm_map_pages(vma, pages, nr_pages);
11581149
if (err)
11591150
pr_err("Remapping memory failed: %d\n", err);
11601151

11611152
return err;
11621153
}
1163-
static int arm_iommu_mmap_attrs(struct device *dev,
1164-
struct vm_area_struct *vma, void *cpu_addr,
1165-
dma_addr_t dma_addr, size_t size, unsigned long attrs)
1166-
{
1167-
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1168-
1169-
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1170-
}
1171-
1172-
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1173-
struct vm_area_struct *vma, void *cpu_addr,
1174-
dma_addr_t dma_addr, size_t size, unsigned long attrs)
1175-
{
1176-
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1177-
}
11781154

11791155
/*
11801156
* free a page as defined by the above mapping.
11811157
* Must not be called with IRQs disabled.
11821158
*/
1183-
static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1184-
dma_addr_t handle, unsigned long attrs, int coherent_flag)
1159+
static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1160+
dma_addr_t handle, unsigned long attrs)
11851161
{
1162+
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
11861163
struct page **pages;
11871164
size = PAGE_ALIGN(size);
11881165

@@ -1204,19 +1181,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad
12041181
__iommu_free_buffer(dev, pages, size, attrs);
12051182
}
12061183

1207-
static void arm_iommu_free_attrs(struct device *dev, size_t size,
1208-
void *cpu_addr, dma_addr_t handle,
1209-
unsigned long attrs)
1210-
{
1211-
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1212-
}
1213-
1214-
static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1215-
void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1216-
{
1217-
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1218-
}
1219-
12201184
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
12211185
void *cpu_addr, dma_addr_t dma_addr,
12221186
size_t size, unsigned long attrs)
@@ -1236,8 +1200,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
12361200
*/
12371201
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
12381202
size_t size, dma_addr_t *handle,
1239-
enum dma_data_direction dir, unsigned long attrs,
1240-
bool is_coherent)
1203+
enum dma_data_direction dir, unsigned long attrs)
12411204
{
12421205
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
12431206
dma_addr_t iova, iova_base;
@@ -1257,7 +1220,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
12571220
phys_addr_t phys = page_to_phys(sg_page(s));
12581221
unsigned int len = PAGE_ALIGN(s->offset + s->length);
12591222

1260-
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1223+
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12611224
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
12621225

12631226
prot = __dma_info_to_prot(dir, attrs);
@@ -1277,9 +1240,20 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
12771240
return ret;
12781241
}
12791242

1280-
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1281-
enum dma_data_direction dir, unsigned long attrs,
1282-
bool is_coherent)
1243+
/**
1244+
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1245+
* @dev: valid struct device pointer
1246+
* @sg: list of buffers
1247+
* @nents: number of buffers to map
1248+
* @dir: DMA transfer direction
1249+
*
1250+
* Map a set of buffers described by scatterlist in streaming mode for DMA.
1251+
* The scatter gather list elements are merged together (if possible) and
1252+
* tagged with the appropriate dma address and length. They are obtained via
1253+
* sg_dma_{address,length}.
1254+
*/
1255+
static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1256+
int nents, enum dma_data_direction dir, unsigned long attrs)
12831257
{
12841258
struct scatterlist *s = sg, *dma = sg, *start = sg;
12851259
int i, count = 0, ret;
@@ -1294,8 +1268,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
12941268

12951269
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
12961270
ret = __map_sg_chunk(dev, start, size,
1297-
&dma->dma_address, dir, attrs,
1298-
is_coherent);
1271+
&dma->dma_address, dir, attrs);
12991272
if (ret < 0)
13001273
goto bad_mapping;
13011274

@@ -1309,8 +1282,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
13091282
}
13101283
size += s->length;
13111284
}
1312-
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1313-
is_coherent);
1285+
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
13141286
if (ret < 0)
13151287
goto bad_mapping;
13161288

@@ -1328,44 +1300,19 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
13281300
}
13291301

13301302
/**
1331-
* arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1332-
* @dev: valid struct device pointer
1333-
* @sg: list of buffers
1334-
* @nents: number of buffers to map
1335-
* @dir: DMA transfer direction
1336-
*
1337-
* Map a set of i/o coherent buffers described by scatterlist in streaming
1338-
* mode for DMA. The scatter gather list elements are merged together (if
1339-
* possible) and tagged with the appropriate dma address and length. They are
1340-
* obtained via sg_dma_{address,length}.
1341-
*/
1342-
static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1343-
int nents, enum dma_data_direction dir, unsigned long attrs)
1344-
{
1345-
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1346-
}
1347-
1348-
/**
1349-
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1303+
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
13501304
* @dev: valid struct device pointer
13511305
* @sg: list of buffers
1352-
* @nents: number of buffers to map
1353-
* @dir: DMA transfer direction
1306+
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1307+
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
13541308
*
1355-
* Map a set of buffers described by scatterlist in streaming mode for DMA.
1356-
* The scatter gather list elements are merged together (if possible) and
1357-
* tagged with the appropriate dma address and length. They are obtained via
1358-
* sg_dma_{address,length}.
1309+
* Unmap a set of streaming mode DMA translations. Again, CPU access
1310+
* rules concerning calls here are the same as for dma_unmap_single().
13591311
*/
1360-
static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1361-
int nents, enum dma_data_direction dir, unsigned long attrs)
1362-
{
1363-
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1364-
}
1365-
1366-
static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1367-
int nents, enum dma_data_direction dir,
1368-
unsigned long attrs, bool is_coherent)
1312+
static void arm_iommu_unmap_sg(struct device *dev,
1313+
struct scatterlist *sg, int nents,
1314+
enum dma_data_direction dir,
1315+
unsigned long attrs)
13691316
{
13701317
struct scatterlist *s;
13711318
int i;
@@ -1374,47 +1321,12 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
13741321
if (sg_dma_len(s))
13751322
__iommu_remove_mapping(dev, sg_dma_address(s),
13761323
sg_dma_len(s));
1377-
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1324+
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
13781325
__dma_page_dev_to_cpu(sg_page(s), s->offset,
13791326
s->length, dir);
13801327
}
13811328
}
13821329

1383-
/**
1384-
* arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1385-
* @dev: valid struct device pointer
1386-
* @sg: list of buffers
1387-
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1388-
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
1389-
*
1390-
* Unmap a set of streaming mode DMA translations. Again, CPU access
1391-
* rules concerning calls here are the same as for dma_unmap_single().
1392-
*/
1393-
static void arm_coherent_iommu_unmap_sg(struct device *dev,
1394-
struct scatterlist *sg, int nents, enum dma_data_direction dir,
1395-
unsigned long attrs)
1396-
{
1397-
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1398-
}
1399-
1400-
/**
1401-
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1402-
* @dev: valid struct device pointer
1403-
* @sg: list of buffers
1404-
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1405-
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
1406-
*
1407-
* Unmap a set of streaming mode DMA translations. Again, CPU access
1408-
* rules concerning calls here are the same as for dma_unmap_single().
1409-
*/
1410-
static void arm_iommu_unmap_sg(struct device *dev,
1411-
struct scatterlist *sg, int nents,
1412-
enum dma_data_direction dir,
1413-
unsigned long attrs)
1414-
{
1415-
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1416-
}
1417-
14181330
/**
14191331
* arm_iommu_sync_sg_for_cpu
14201332
* @dev: valid struct device pointer
@@ -1452,25 +1364,27 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
14521364
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
14531365
}
14541366

1455-
14561367
/**
1457-
* arm_coherent_iommu_map_page
1368+
* arm_iommu_map_page
14581369
* @dev: valid struct device pointer
14591370
* @page: page that buffer resides in
14601371
* @offset: offset into page for start of buffer
14611372
* @size: size of buffer to map
14621373
* @dir: DMA transfer direction
14631374
*
1464-
* Coherent IOMMU aware version of arm_dma_map_page()
1375+
* IOMMU aware version of arm_dma_map_page()
14651376
*/
1466-
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1377+
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
14671378
unsigned long offset, size_t size, enum dma_data_direction dir,
14681379
unsigned long attrs)
14691380
{
14701381
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14711382
dma_addr_t dma_addr;
14721383
int ret, prot, len = PAGE_ALIGN(size + offset);
14731384

1385+
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1386+
__dma_page_cpu_to_dev(page, offset, size, dir);
1387+
14741388
dma_addr = __alloc_iova(mapping, len);
14751389
if (dma_addr == DMA_MAPPING_ERROR)
14761390
return dma_addr;
@@ -1487,50 +1401,6 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
14871401
return DMA_MAPPING_ERROR;
14881402
}
14891403

1490-
/**
1491-
* arm_iommu_map_page
1492-
* @dev: valid struct device pointer
1493-
* @page: page that buffer resides in
1494-
* @offset: offset into page for start of buffer
1495-
* @size: size of buffer to map
1496-
* @dir: DMA transfer direction
1497-
*
1498-
* IOMMU aware version of arm_dma_map_page()
1499-
*/
1500-
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1501-
unsigned long offset, size_t size, enum dma_data_direction dir,
1502-
unsigned long attrs)
1503-
{
1504-
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1505-
__dma_page_cpu_to_dev(page, offset, size, dir);
1506-
1507-
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1508-
}
1509-
1510-
/**
1511-
* arm_coherent_iommu_unmap_page
1512-
* @dev: valid struct device pointer
1513-
* @handle: DMA address of buffer
1514-
* @size: size of buffer (same as passed to dma_map_page)
1515-
* @dir: DMA transfer direction (same as passed to dma_map_page)
1516-
*
1517-
* Coherent IOMMU aware version of arm_dma_unmap_page()
1518-
*/
1519-
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1520-
size_t size, enum dma_data_direction dir, unsigned long attrs)
1521-
{
1522-
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1523-
dma_addr_t iova = handle & PAGE_MASK;
1524-
int offset = handle & ~PAGE_MASK;
1525-
int len = PAGE_ALIGN(size + offset);
1526-
1527-
if (!iova)
1528-
return;
1529-
1530-
iommu_unmap(mapping->domain, iova, len);
1531-
__free_iova(mapping, iova, len);
1532-
}
1533-
15341404
/**
15351405
* arm_iommu_unmap_page
15361406
* @dev: valid struct device pointer
@@ -1545,15 +1415,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
15451415
{
15461416
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
15471417
dma_addr_t iova = handle & PAGE_MASK;
1548-
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1418+
struct page *page;
15491419
int offset = handle & ~PAGE_MASK;
15501420
int len = PAGE_ALIGN(size + offset);
15511421

15521422
if (!iova)
15531423
return;
15541424

1555-
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1425+
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1426+
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
15561427
__dma_page_dev_to_cpu(page, offset, size, dir);
1428+
}
15571429

15581430
iommu_unmap(mapping->domain, iova, len);
15591431
__free_iova(mapping, iova, len);
@@ -1665,16 +1537,16 @@ static const struct dma_map_ops iommu_ops = {
16651537
};
16661538

16671539
static const struct dma_map_ops iommu_coherent_ops = {
1668-
.alloc = arm_coherent_iommu_alloc_attrs,
1669-
.free = arm_coherent_iommu_free_attrs,
1670-
.mmap = arm_coherent_iommu_mmap_attrs,
1540+
.alloc = arm_iommu_alloc_attrs,
1541+
.free = arm_iommu_free_attrs,
1542+
.mmap = arm_iommu_mmap_attrs,
16711543
.get_sgtable = arm_iommu_get_sgtable,
16721544

1673-
.map_page = arm_coherent_iommu_map_page,
1674-
.unmap_page = arm_coherent_iommu_unmap_page,
1545+
.map_page = arm_iommu_map_page,
1546+
.unmap_page = arm_iommu_unmap_page,
16751547

1676-
.map_sg = arm_coherent_iommu_map_sg,
1677-
.unmap_sg = arm_coherent_iommu_unmap_sg,
1548+
.map_sg = arm_iommu_map_sg,
1549+
.unmap_sg = arm_iommu_unmap_sg,
16781550

16791551
.map_resource = arm_iommu_map_resource,
16801552
.unmap_resource = arm_iommu_unmap_resource,

0 commit comments

Comments
 (0)