@@ -75,39 +75,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
75
75
min_not_zero (dev -> coherent_dma_mask , dev -> bus_dma_limit );
76
76
}
77
77
78
- /*
79
- * Decrypting memory is allowed to block, so if this device requires
80
- * unencrypted memory it must come from atomic pools.
81
- */
82
- static inline bool dma_should_alloc_from_pool (struct device * dev , gfp_t gfp ,
83
- unsigned long attrs )
84
- {
85
- if (!IS_ENABLED (CONFIG_DMA_COHERENT_POOL ))
86
- return false;
87
- if (gfpflags_allow_blocking (gfp ))
88
- return false;
89
- if (force_dma_unencrypted (dev ))
90
- return true;
91
- if (!IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ))
92
- return false;
93
- if (dma_alloc_need_uncached (dev , attrs ))
94
- return true;
95
- return false;
96
- }
97
-
98
- static inline bool dma_should_free_from_pool (struct device * dev ,
99
- unsigned long attrs )
100
- {
101
- if (IS_ENABLED (CONFIG_DMA_COHERENT_POOL ))
102
- return true;
103
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
104
- !force_dma_unencrypted (dev ))
105
- return false;
106
- if (IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ))
107
- return true;
108
- return false;
109
- }
110
-
111
78
static struct page * __dma_direct_alloc_pages (struct device * dev , size_t size ,
112
79
gfp_t gfp )
113
80
{
@@ -170,35 +137,45 @@ void *dma_direct_alloc(struct device *dev, size_t size,
170
137
void * ret ;
171
138
int err ;
172
139
173
- if (!IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
174
- !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
175
- dma_alloc_need_uncached (dev , attrs ))
176
- return arch_dma_alloc (dev , size , dma_handle , gfp , attrs );
177
-
178
140
size = PAGE_ALIGN (size );
179
141
if (attrs & DMA_ATTR_NO_WARN )
180
142
gfp |= __GFP_NOWARN ;
181
143
182
- if (dma_should_alloc_from_pool (dev , gfp , attrs ))
183
- return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
184
-
185
- /* we always manually zero the memory once we are done */
186
- page = __dma_direct_alloc_pages (dev , size , gfp & ~__GFP_ZERO );
187
- if (!page )
188
- return NULL ;
189
-
190
144
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
191
145
!force_dma_unencrypted (dev )) {
146
+ page = __dma_direct_alloc_pages (dev , size , gfp & ~__GFP_ZERO );
147
+ if (!page )
148
+ return NULL ;
192
149
/* remove any dirty cache lines on the kernel alias */
193
150
if (!PageHighMem (page ))
194
151
arch_dma_prep_coherent (page , size );
152
+ * dma_handle = phys_to_dma_direct (dev , page_to_phys (page ));
195
153
/* return the page pointer as the opaque cookie */
196
- ret = page ;
197
- goto done ;
154
+ return page ;
198
155
}
199
156
157
+ if (!IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
158
+ !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
159
+ !dev_is_dma_coherent (dev ))
160
+ return arch_dma_alloc (dev , size , dma_handle , gfp , attrs );
161
+
162
+ /*
163
+ * Remapping or decrypting memory may block. If either is required and
164
+ * we can't block, allocate the memory from the atomic pools.
165
+ */
166
+ if (IS_ENABLED (CONFIG_DMA_COHERENT_POOL ) &&
167
+ !gfpflags_allow_blocking (gfp ) &&
168
+ (force_dma_unencrypted (dev ) ||
169
+ (IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) && !dev_is_dma_coherent (dev ))))
170
+ return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
171
+
172
+ /* we always manually zero the memory once we are done */
173
+ page = __dma_direct_alloc_pages (dev , size , gfp & ~__GFP_ZERO );
174
+ if (!page )
175
+ return NULL ;
176
+
200
177
if ((IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
201
- dma_alloc_need_uncached (dev , attrs )) ||
178
+ ! dev_is_dma_coherent (dev )) ||
202
179
(IS_ENABLED (CONFIG_DMA_REMAP ) && PageHighMem (page ))) {
203
180
/* remove any dirty cache lines on the kernel alias */
204
181
arch_dma_prep_coherent (page , size );
@@ -241,7 +218,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
241
218
memset (ret , 0 , size );
242
219
243
220
if (IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
244
- dma_alloc_need_uncached (dev , attrs )) {
221
+ ! dev_is_dma_coherent (dev )) {
245
222
arch_dma_prep_coherent (page , size );
246
223
ret = arch_dma_set_uncached (ret , size );
247
224
if (IS_ERR (ret ))
@@ -269,25 +246,25 @@ void dma_direct_free(struct device *dev, size_t size,
269
246
{
270
247
unsigned int page_order = get_order (size );
271
248
249
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
250
+ !force_dma_unencrypted (dev )) {
251
+ /* cpu_addr is a struct page cookie, not a kernel address */
252
+ dma_free_contiguous (dev , cpu_addr , size );
253
+ return ;
254
+ }
255
+
272
256
if (!IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
273
257
!IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
274
- dma_alloc_need_uncached (dev , attrs )) {
258
+ ! dev_is_dma_coherent (dev )) {
275
259
arch_dma_free (dev , size , cpu_addr , dma_addr , attrs );
276
260
return ;
277
261
}
278
262
279
263
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
280
- if (dma_should_free_from_pool ( dev , attrs ) &&
264
+ if (IS_ENABLED ( CONFIG_DMA_COHERENT_POOL ) &&
281
265
dma_free_from_pool (dev , cpu_addr , PAGE_ALIGN (size )))
282
266
return ;
283
267
284
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
285
- !force_dma_unencrypted (dev )) {
286
- /* cpu_addr is a struct page cookie, not a kernel address */
287
- dma_free_contiguous (dev , cpu_addr , size );
288
- return ;
289
- }
290
-
291
268
if (force_dma_unencrypted (dev ))
292
269
set_memory_encrypted ((unsigned long )cpu_addr , 1 << page_order );
293
270
@@ -305,7 +282,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
305
282
struct page * page ;
306
283
void * ret ;
307
284
308
- if (dma_should_alloc_from_pool (dev , gfp , 0 ))
285
+ if (IS_ENABLED (CONFIG_DMA_COHERENT_POOL ) &&
286
+ force_dma_unencrypted (dev ) && !gfpflags_allow_blocking (gfp ))
309
287
return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
310
288
311
289
page = __dma_direct_alloc_pages (dev , size , gfp );
@@ -344,7 +322,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
344
322
void * vaddr = page_address (page );
345
323
346
324
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
347
- if (dma_should_free_from_pool ( dev , 0 ) &&
325
+ if (IS_ENABLED ( CONFIG_DMA_COHERENT_POOL ) &&
348
326
dma_free_from_pool (dev , vaddr , size ))
349
327
return ;
350
328
0 commit comments