Skip to content

Commit 849face

Browse files
author
Christoph Hellwig
committed
dma-direct: simplify the DMA_ATTR_NO_KERNEL_MAPPING handling
Use and entirely separate code path for the DMA_ATTR_NO_KERNEL_MAPPING path. This avoids any confusion about the ret type, and avoids lots of attr checks and helpers that can be significantly simplified now. It also ensures that common handling is applied to architetures still using the arch alloc/free hooks. Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 5b138c5 commit 849face

File tree

2 files changed

+39
-74
lines changed

2 files changed

+39
-74
lines changed

include/linux/dma-map-ops.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -219,19 +219,6 @@ static inline bool dev_is_dma_coherent(struct device *dev)
219219
}
220220
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
221221

222-
/*
223-
* Check if an allocation needs to be marked uncached to be coherent.
224-
*/
225-
static __always_inline bool dma_alloc_need_uncached(struct device *dev,
226-
unsigned long attrs)
227-
{
228-
if (dev_is_dma_coherent(dev))
229-
return false;
230-
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
231-
return false;
232-
return true;
233-
}
234-
235222
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
236223
gfp_t gfp, unsigned long attrs);
237224
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,

kernel/dma/direct.c

Lines changed: 39 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -75,39 +75,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
7575
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
7676
}
7777

78-
/*
79-
* Decrypting memory is allowed to block, so if this device requires
80-
* unencrypted memory it must come from atomic pools.
81-
*/
82-
static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
83-
unsigned long attrs)
84-
{
85-
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
86-
return false;
87-
if (gfpflags_allow_blocking(gfp))
88-
return false;
89-
if (force_dma_unencrypted(dev))
90-
return true;
91-
if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
92-
return false;
93-
if (dma_alloc_need_uncached(dev, attrs))
94-
return true;
95-
return false;
96-
}
97-
98-
static inline bool dma_should_free_from_pool(struct device *dev,
99-
unsigned long attrs)
100-
{
101-
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
102-
return true;
103-
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
104-
!force_dma_unencrypted(dev))
105-
return false;
106-
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
107-
return true;
108-
return false;
109-
}
110-
11178
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
11279
gfp_t gfp)
11380
{
@@ -170,35 +137,45 @@ void *dma_direct_alloc(struct device *dev, size_t size,
170137
void *ret;
171138
int err;
172139

173-
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
174-
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
175-
dma_alloc_need_uncached(dev, attrs))
176-
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
177-
178140
size = PAGE_ALIGN(size);
179141
if (attrs & DMA_ATTR_NO_WARN)
180142
gfp |= __GFP_NOWARN;
181143

182-
if (dma_should_alloc_from_pool(dev, gfp, attrs))
183-
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
184-
185-
/* we always manually zero the memory once we are done */
186-
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
187-
if (!page)
188-
return NULL;
189-
190144
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
191145
!force_dma_unencrypted(dev)) {
146+
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
147+
if (!page)
148+
return NULL;
192149
/* remove any dirty cache lines on the kernel alias */
193150
if (!PageHighMem(page))
194151
arch_dma_prep_coherent(page, size);
152+
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
195153
/* return the page pointer as the opaque cookie */
196-
ret = page;
197-
goto done;
154+
return page;
198155
}
199156

157+
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
158+
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
159+
!dev_is_dma_coherent(dev))
160+
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
161+
162+
/*
163+
* Remapping or decrypting memory may block. If either is required and
164+
* we can't block, allocate the memory from the atomic pools.
165+
*/
166+
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
167+
!gfpflags_allow_blocking(gfp) &&
168+
(force_dma_unencrypted(dev) ||
169+
(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
170+
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
171+
172+
/* we always manually zero the memory once we are done */
173+
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
174+
if (!page)
175+
return NULL;
176+
200177
if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
201-
dma_alloc_need_uncached(dev, attrs)) ||
178+
!dev_is_dma_coherent(dev)) ||
202179
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
203180
/* remove any dirty cache lines on the kernel alias */
204181
arch_dma_prep_coherent(page, size);
@@ -241,7 +218,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
241218
memset(ret, 0, size);
242219

243220
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
244-
dma_alloc_need_uncached(dev, attrs)) {
221+
!dev_is_dma_coherent(dev)) {
245222
arch_dma_prep_coherent(page, size);
246223
ret = arch_dma_set_uncached(ret, size);
247224
if (IS_ERR(ret))
@@ -269,25 +246,25 @@ void dma_direct_free(struct device *dev, size_t size,
269246
{
270247
unsigned int page_order = get_order(size);
271248

249+
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
250+
!force_dma_unencrypted(dev)) {
251+
/* cpu_addr is a struct page cookie, not a kernel address */
252+
dma_free_contiguous(dev, cpu_addr, size);
253+
return;
254+
}
255+
272256
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
273257
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
274-
dma_alloc_need_uncached(dev, attrs)) {
258+
!dev_is_dma_coherent(dev)) {
275259
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
276260
return;
277261
}
278262

279263
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
280-
if (dma_should_free_from_pool(dev, attrs) &&
264+
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
281265
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
282266
return;
283267

284-
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
285-
!force_dma_unencrypted(dev)) {
286-
/* cpu_addr is a struct page cookie, not a kernel address */
287-
dma_free_contiguous(dev, cpu_addr, size);
288-
return;
289-
}
290-
291268
if (force_dma_unencrypted(dev))
292269
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
293270

@@ -305,7 +282,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
305282
struct page *page;
306283
void *ret;
307284

308-
if (dma_should_alloc_from_pool(dev, gfp, 0))
285+
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
286+
force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
309287
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
310288

311289
page = __dma_direct_alloc_pages(dev, size, gfp);
@@ -344,7 +322,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
344322
void *vaddr = page_address(page);
345323

346324
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
347-
if (dma_should_free_from_pool(dev, 0) &&
325+
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
348326
dma_free_from_pool(dev, vaddr, size))
349327
return;
350328

0 commit comments

Comments
 (0)