Skip to content

Commit 513510d

Browse files
labbotttorvalds
authored andcommitted
common: dma-mapping: introduce common remapping functions
For architectures without coherent DMA, memory for DMA may need to be remapped with coherent attributes. Factor out the the remapping code from arm and put it in a common location to reduce code duplication. As part of this, the arm APIs are now migrated away from ioremap_page_range to the common APIs which use map_vm_area for remapping. This should be an equivalent change and using map_vm_area is more correct as ioremap_page_range is intended to bring in io addresses into the cpu space and not regular kernel managed memory. Signed-off-by: Laura Abbott <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: David Riley <[email protected]> Cc: Olof Johansson <[email protected]> Cc: Ritesh Harjain <[email protected]> Cc: Russell King <[email protected]> Cc: Thierry Reding <[email protected]> Cc: Will Deacon <[email protected]> Cc: James Hogan <[email protected]> Cc: Laura Abbott <[email protected]> Cc: Mitchel Humpherys <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9efb3a4 commit 513510d

File tree

3 files changed

+90
-48
lines changed

3 files changed

+90
-48
lines changed

arch/arm/mm/dma-mapping.c

Lines changed: 9 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -298,37 +298,19 @@ static void *
298298
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
299299
const void *caller)
300300
{
301-
struct vm_struct *area;
302-
unsigned long addr;
303-
304301
/*
305302
* DMA allocation can be mapped to user space, so lets
306303
* set VM_USERMAP flags too.
307304
*/
308-
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
309-
caller);
310-
if (!area)
311-
return NULL;
312-
addr = (unsigned long)area->addr;
313-
area->phys_addr = __pfn_to_phys(page_to_pfn(page));
314-
315-
if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
316-
vunmap((void *)addr);
317-
return NULL;
318-
}
319-
return (void *)addr;
305+
return dma_common_contiguous_remap(page, size,
306+
VM_ARM_DMA_CONSISTENT | VM_USERMAP,
307+
prot, caller);
320308
}
321309

322310
static void __dma_free_remap(void *cpu_addr, size_t size)
323311
{
324-
unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
325-
struct vm_struct *area = find_vm_area(cpu_addr);
326-
if (!area || (area->flags & flags) != flags) {
327-
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
328-
return;
329-
}
330-
unmap_kernel_range((unsigned long)cpu_addr, size);
331-
vunmap(cpu_addr);
312+
dma_common_free_remap(cpu_addr, size,
313+
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
332314
}
333315

334316
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -1271,29 +1253,8 @@ static void *
12711253
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
12721254
const void *caller)
12731255
{
1274-
unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1275-
struct vm_struct *area;
1276-
unsigned long p;
1277-
1278-
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1279-
caller);
1280-
if (!area)
1281-
return NULL;
1282-
1283-
area->pages = pages;
1284-
area->nr_pages = nr_pages;
1285-
p = (unsigned long)area->addr;
1286-
1287-
for (i = 0; i < nr_pages; i++) {
1288-
phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1289-
if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1290-
goto err;
1291-
p += PAGE_SIZE;
1292-
}
1293-
return area->addr;
1294-
err:
1295-
unmap_kernel_range((unsigned long)area->addr, size);
1296-
vunmap(area->addr);
1256+
return dma_common_pages_remap(pages, size,
1257+
VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
12971258
return NULL;
12981259
}
12991260

@@ -1501,8 +1462,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
15011462
}
15021463

15031464
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1504-
unmap_kernel_range((unsigned long)cpu_addr, size);
1505-
vunmap(cpu_addr);
1465+
dma_common_free_remap(cpu_addr, size,
1466+
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
15061467
}
15071468

15081469
__iommu_remove_mapping(dev, handle, size);

drivers/base/dma-mapping.c

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
#include <linux/dma-mapping.h>
1111
#include <linux/export.h>
1212
#include <linux/gfp.h>
13+
#include <linux/slab.h>
14+
#include <linux/vmalloc.h>
1315
#include <asm-generic/dma-coherent.h>
1416

1517
/*
@@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
267269
return ret;
268270
}
269271
EXPORT_SYMBOL(dma_common_mmap);
272+
273+
#ifdef CONFIG_MMU
274+
/*
275+
* remaps an array of PAGE_SIZE pages into another vm_area
276+
* Cannot be used in non-sleeping contexts
277+
*/
278+
void *dma_common_pages_remap(struct page **pages, size_t size,
279+
unsigned long vm_flags, pgprot_t prot,
280+
const void *caller)
281+
{
282+
struct vm_struct *area;
283+
284+
area = get_vm_area_caller(size, vm_flags, caller);
285+
if (!area)
286+
return NULL;
287+
288+
area->pages = pages;
289+
290+
if (map_vm_area(area, prot, pages)) {
291+
vunmap(area->addr);
292+
return NULL;
293+
}
294+
295+
return area->addr;
296+
}
297+
298+
/*
299+
* remaps an allocated contiguous region into another vm_area.
300+
* Cannot be used in non-sleeping contexts
301+
*/
302+
303+
void *dma_common_contiguous_remap(struct page *page, size_t size,
304+
unsigned long vm_flags,
305+
pgprot_t prot, const void *caller)
306+
{
307+
int i;
308+
struct page **pages;
309+
void *ptr;
310+
unsigned long pfn;
311+
312+
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
313+
if (!pages)
314+
return NULL;
315+
316+
for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
317+
pages[i] = pfn_to_page(pfn + i);
318+
319+
ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
320+
321+
kfree(pages);
322+
323+
return ptr;
324+
}
325+
326+
/*
327+
* unmaps a range previously mapped by dma_common_*_remap
328+
*/
329+
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
330+
{
331+
struct vm_struct *area = find_vm_area(cpu_addr);
332+
333+
if (!area || (area->flags & vm_flags) != vm_flags) {
334+
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
335+
return;
336+
}
337+
338+
unmap_kernel_range((unsigned long)cpu_addr, size);
339+
vunmap(cpu_addr);
340+
}
341+
#endif

include/asm-generic/dma-mapping-common.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
179179
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
180180
void *cpu_addr, dma_addr_t dma_addr, size_t size);
181181

182+
void *dma_common_contiguous_remap(struct page *page, size_t size,
183+
unsigned long vm_flags,
184+
pgprot_t prot, const void *caller);
185+
186+
void *dma_common_pages_remap(struct page **pages, size_t size,
187+
unsigned long vm_flags, pgprot_t prot,
188+
const void *caller);
189+
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
190+
182191
/**
183192
* dma_mmap_attrs - map a coherent DMA allocation into user space
184193
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices

0 commit comments

Comments
 (0)