Skip to content

Commit 92f66f8

Browse files
committed
arm64: Fix the DMA mmap and get_sgtable API with DMA_ATTR_FORCE_CONTIGUOUS
While honouring the DMA_ATTR_FORCE_CONTIGUOUS on arm64 (commit 44176bb: "arm64: Add support for DMA_ATTR_FORCE_CONTIGUOUS to IOMMU"), the existing uses of dma_mmap_attrs() and dma_get_sgtable() have been broken by passing a physically contiguous vm_struct with an invalid pages pointer through the common iommu API. Since the coherent allocation with DMA_ATTR_FORCE_CONTIGUOUS uses CMA, this patch simply reuses the existing swiotlb logic for mmap and get_sgtable. Note that the current implementation of get_sgtable (both swiotlb and iommu) is broken if dma_declare_coherent_memory() is used since such memory does not have a corresponding struct page. To be addressed in a subsequent patch. Fixes: 44176bb ("arm64: Add support for DMA_ATTR_FORCE_CONTIGUOUS to IOMMU") Reported-by: Andrzej Hajda <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Acked-by: Robin Murphy <[email protected]> Tested-by: Andrzej Hajda <[email protected]> Reviewed-by: Andrzej Hajda <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent 2f9a0be commit 92f66f8

File tree

1 file changed

+49
-16
lines changed

1 file changed

+49
-16
lines changed

arch/arm64/mm/dma-mapping.c

Lines changed: 49 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -308,24 +308,15 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
308308
sg->length, dir);
309309
}
310310

311-
static int __swiotlb_mmap(struct device *dev,
312-
struct vm_area_struct *vma,
313-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
314-
unsigned long attrs)
311+
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
312+
unsigned long pfn, size_t size)
315313
{
316314
int ret = -ENXIO;
317315
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
318316
PAGE_SHIFT;
319317
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
320-
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
321318
unsigned long off = vma->vm_pgoff;
322319

323-
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
324-
is_device_dma_coherent(dev));
325-
326-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
327-
return ret;
328-
329320
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
330321
ret = remap_pfn_range(vma, vma->vm_start,
331322
pfn + off,
@@ -336,19 +327,43 @@ static int __swiotlb_mmap(struct device *dev,
336327
return ret;
337328
}
338329

339-
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
340-
void *cpu_addr, dma_addr_t handle, size_t size,
341-
unsigned long attrs)
330+
static int __swiotlb_mmap(struct device *dev,
331+
struct vm_area_struct *vma,
332+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
333+
unsigned long attrs)
334+
{
335+
int ret;
336+
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
337+
338+
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
339+
is_device_dma_coherent(dev));
340+
341+
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
342+
return ret;
343+
344+
return __swiotlb_mmap_pfn(vma, pfn, size);
345+
}
346+
347+
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
348+
struct page *page, size_t size)
342349
{
343350
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
344351

345352
if (!ret)
346-
sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
347-
PAGE_ALIGN(size), 0);
353+
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
348354

349355
return ret;
350356
}
351357

358+
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
359+
void *cpu_addr, dma_addr_t handle, size_t size,
360+
unsigned long attrs)
361+
{
362+
struct page *page = phys_to_page(dma_to_phys(dev, handle));
363+
364+
return __swiotlb_get_sgtable_page(sgt, page, size);
365+
}
366+
352367
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
353368
{
354369
if (swiotlb)
@@ -703,6 +718,15 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
703718
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
704719
return ret;
705720

721+
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
722+
/*
723+
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
724+
* hence in the vmalloc space.
725+
*/
726+
unsigned long pfn = vmalloc_to_pfn(cpu_addr);
727+
return __swiotlb_mmap_pfn(vma, pfn, size);
728+
}
729+
706730
area = find_vm_area(cpu_addr);
707731
if (WARN_ON(!area || !area->pages))
708732
return -ENXIO;
@@ -717,6 +741,15 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
717741
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
718742
struct vm_struct *area = find_vm_area(cpu_addr);
719743

744+
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
745+
/*
746+
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
747+
* hence in the vmalloc space.
748+
*/
749+
struct page *page = vmalloc_to_page(cpu_addr);
750+
return __swiotlb_get_sgtable_page(sgt, page, size);
751+
}
752+
720753
if (WARN_ON(!area || !area->pages))
721754
return -ENXIO;
722755

0 commit comments

Comments
 (0)