Skip to content

Commit 878ec36

Browse files
murzinvChristoph Hellwig
authored andcommitted
ARM: NOMMU: Wire-up default DMA interface
The way how default DMA pool is exposed has changed and now we need to use dedicated interface to work with it. This patch makes alloc/release operations to use such interface. Since, default DMA pool is not handled by generic code anymore we have to implement our own mmap operation. Tested-by: Andras Szemzo <[email protected]> Reviewed-by: Robin Murphy <[email protected]> Signed-off-by: Vladimir Murzin <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 43fc509 commit 878ec36

File tree

1 file changed

+36
-9
lines changed

1 file changed

+36
-9
lines changed

arch/arm/mm/dma-mapping-nommu.c

Lines changed: 36 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,21 +40,30 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
4040

4141
{
4242
const struct dma_map_ops *ops = &dma_noop_ops;
43+
void *ret;
4344

4445
/*
45-
* We are here because:
46+
* Try generic allocator first if we are advertised that
47+
* consistency is not required.
48+
*/
49+
50+
if (attrs & DMA_ATTR_NON_CONSISTENT)
51+
return ops->alloc(dev, size, dma_handle, gfp, attrs);
52+
53+
ret = dma_alloc_from_global_coherent(size, dma_handle);
54+
55+
/*
56+
* dma_alloc_from_global_coherent() may fail because:
57+
*
4658
* - no consistent DMA region has been defined, so we can't
4759
* continue.
4860
* - there is no space left in consistent DMA region, so we
4961
* only can fallback to generic allocator if we are
5062
* advertised that consistency is not required.
5163
*/
5264

53-
if (attrs & DMA_ATTR_NON_CONSISTENT)
54-
return ops->alloc(dev, size, dma_handle, gfp, attrs);
55-
56-
WARN_ON_ONCE(1);
57-
return NULL;
65+
WARN_ON_ONCE(ret == NULL);
66+
return ret;
5867
}
5968

6069
static void arm_nommu_dma_free(struct device *dev, size_t size,
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
6372
{
6473
const struct dma_map_ops *ops = &dma_noop_ops;
6574

66-
if (attrs & DMA_ATTR_NON_CONSISTENT)
75+
if (attrs & DMA_ATTR_NON_CONSISTENT) {
6776
ops->free(dev, size, cpu_addr, dma_addr, attrs);
68-
else
69-
WARN_ON_ONCE(1);
77+
} else {
78+
int ret = dma_release_from_global_coherent(get_order(size),
79+
cpu_addr);
80+
81+
WARN_ON_ONCE(ret == 0);
82+
}
7083

7184
return;
7285
}
7386

87+
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
89+
unsigned long attrs)
90+
{
91+
int ret;
92+
93+
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
94+
return ret;
95+
96+
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
97+
}
98+
99+
74100
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
75101
enum dma_data_direction dir)
76102
{
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
173199
const struct dma_map_ops arm_nommu_dma_ops = {
174200
.alloc = arm_nommu_dma_alloc,
175201
.free = arm_nommu_dma_free,
202+
.mmap = arm_nommu_dma_mmap,
176203
.map_page = arm_nommu_dma_map_page,
177204
.unmap_page = arm_nommu_dma_unmap_page,
178205
.map_sg = arm_nommu_dma_map_sg,

0 commit comments

Comments
 (0)