Skip to content

Commit f5ff79f

Browse files
author
Christoph Hellwig
committed
dma-mapping: remove CONFIG_DMA_REMAP
CONFIG_DMA_REMAP is used to build a few helpers around the core vmalloc code, and to use them in case there is a highmem page in dma-direct, and to make dma coherent allocations be able to use non-contiguous pages allocations for DMA allocations in the dma-iommu layer. Right now it needs to be explicitly selected by architectures, and is only done so by architectures that require remapping to deal with devices that are not DMA coherent. Make it unconditional for builds with CONFIG_MMU as it is very little extra code, but makes it much more likely that large DMA allocations succeed on x86. This fixes hot plugging a NVMe thunderbolt SSD for me, which tries to allocate a 1MB buffer that is otherwise hard to obtain due to memory fragmentation on a heavily used laptop. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Robin Murphy <[email protected]>
1 parent fba0909 commit f5ff79f

File tree

6 files changed

+16
-29
lines changed

6 files changed

+16
-29
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ config ARM
4747
select DMA_DECLARE_COHERENT
4848
select DMA_GLOBAL_POOL if !MMU
4949
select DMA_OPS
50-
select DMA_REMAP if MMU
50+
select DMA_NONCOHERENT_MMAP if MMU
5151
select EDAC_SUPPORT
5252
select EDAC_ATOMIC_SCRUB
5353
select GENERIC_ALLOCATOR

arch/xtensa/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ config XTENSA
1717
select BUILDTIME_TABLE_SORT
1818
select CLONE_BACKWARDS
1919
select COMMON_CLK
20-
select DMA_REMAP if MMU
20+
select DMA_NONCOHERENT_MMAP if MMU
2121
select GENERIC_ATOMIC64
2222
select GENERIC_IRQ_SHOW
2323
select GENERIC_PCI_IOMAP

drivers/iommu/dma-iommu.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -852,7 +852,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
852852
return NULL;
853853
}
854854

855-
#ifdef CONFIG_DMA_REMAP
856855
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
857856
size_t size, enum dma_data_direction dir, gfp_t gfp,
858857
unsigned long attrs)
@@ -882,7 +881,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
882881
sg_free_table(&sh->sgt);
883882
kfree(sh);
884883
}
885-
#endif /* CONFIG_DMA_REMAP */
886884

887885
static void iommu_dma_sync_single_for_cpu(struct device *dev,
888886
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
@@ -1276,7 +1274,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
12761274
dma_free_from_pool(dev, cpu_addr, alloc_size))
12771275
return;
12781276

1279-
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1277+
if (is_vmalloc_addr(cpu_addr)) {
12801278
/*
12811279
* If it the address is remapped, then it's either non-coherent
12821280
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
@@ -1318,7 +1316,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
13181316
if (!page)
13191317
return NULL;
13201318

1321-
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1319+
if (!coherent || PageHighMem(page)) {
13221320
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
13231321

13241322
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
@@ -1350,7 +1348,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
13501348

13511349
gfp |= __GFP_ZERO;
13521350

1353-
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1351+
if (gfpflags_allow_blocking(gfp) &&
13541352
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
13551353
return iommu_dma_alloc_remap(dev, size, handle, gfp,
13561354
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
@@ -1391,7 +1389,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
13911389
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
13921390
return -ENXIO;
13931391

1394-
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1392+
if (is_vmalloc_addr(cpu_addr)) {
13951393
struct page **pages = dma_common_find_pages(cpu_addr);
13961394

13971395
if (pages)
@@ -1413,7 +1411,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
14131411
struct page *page;
14141412
int ret;
14151413

1416-
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1414+
if (is_vmalloc_addr(cpu_addr)) {
14171415
struct page **pages = dma_common_find_pages(cpu_addr);
14181416

14191417
if (pages) {
@@ -1445,10 +1443,8 @@ static const struct dma_map_ops iommu_dma_ops = {
14451443
.free = iommu_dma_free,
14461444
.alloc_pages = dma_common_alloc_pages,
14471445
.free_pages = dma_common_free_pages,
1448-
#ifdef CONFIG_DMA_REMAP
14491446
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
14501447
.free_noncontiguous = iommu_dma_free_noncontiguous,
1451-
#endif
14521448
.mmap = iommu_dma_mmap,
14531449
.get_sgtable = iommu_dma_get_sgtable,
14541450
.map_page = iommu_dma_map_page,

kernel/dma/Kconfig

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
110110
select DMA_DECLARE_COHERENT
111111
bool
112112

113-
config DMA_REMAP
114-
bool
115-
depends on MMU
116-
select DMA_NONCOHERENT_MMAP
117-
118113
config DMA_DIRECT_REMAP
119114
bool
120-
select DMA_REMAP
121115
select DMA_COHERENT_POOL
116+
select DMA_NONCOHERENT_MMAP
122117

123118
config DMA_CMA
124119
bool "DMA Contiguous Memory Allocator"

kernel/dma/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
88
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
99
obj-$(CONFIG_SWIOTLB) += swiotlb.o
1010
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
11-
obj-$(CONFIG_DMA_REMAP) += remap.o
11+
obj-$(CONFIG_MMU) += remap.o
1212
obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o

kernel/dma/direct.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
265265
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
266266
if (!page)
267267
return NULL;
268+
269+
/*
270+
* dma_alloc_contiguous can return highmem pages depending on a
271+
* combination the cma= arguments and per-arch setup. These need to be
272+
* remapped to return a kernel virtual address.
273+
*/
268274
if (PageHighMem(page)) {
269-
/*
270-
* Depending on the cma= arguments and per-arch setup,
271-
* dma_alloc_contiguous could return highmem pages.
272-
* Without remapping there is no way to return them here, so
273-
* log an error and fail.
274-
*/
275-
if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
276-
dev_info(dev, "Rejecting highmem page from CMA.\n");
277-
goto out_free_pages;
278-
}
279275
remap = true;
280276
set_uncached = false;
281277
}
@@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
349345
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
350346
return;
351347

352-
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
348+
if (is_vmalloc_addr(cpu_addr)) {
353349
vunmap(cpu_addr);
354350
} else {
355351
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))

0 commit comments

Comments
 (0)