@@ -75,6 +75,15 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
75
75
min_not_zero (dev -> coherent_dma_mask , dev -> bus_dma_limit );
76
76
}
77
77
78
+ static void __dma_direct_free_pages (struct device * dev , struct page * page ,
79
+ size_t size )
80
+ {
81
+ if (IS_ENABLED (CONFIG_DMA_RESTRICTED_POOL ) &&
82
+ swiotlb_free (dev , page , size ))
83
+ return ;
84
+ dma_free_contiguous (dev , page , size );
85
+ }
86
+
78
87
static struct page * __dma_direct_alloc_pages (struct device * dev , size_t size ,
79
88
gfp_t gfp )
80
89
{
@@ -86,6 +95,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
86
95
87
96
gfp |= dma_direct_optimal_gfp_mask (dev , dev -> coherent_dma_mask ,
88
97
& phys_limit );
98
+ if (IS_ENABLED (CONFIG_DMA_RESTRICTED_POOL ) &&
99
+ is_swiotlb_for_alloc (dev )) {
100
+ page = swiotlb_alloc (dev , size );
101
+ if (page && !dma_coherent_ok (dev , page_to_phys (page ), size )) {
102
+ __dma_direct_free_pages (dev , page , size );
103
+ return NULL ;
104
+ }
105
+ return page ;
106
+ }
107
+
89
108
page = dma_alloc_contiguous (dev , size , gfp );
90
109
if (page && !dma_coherent_ok (dev , page_to_phys (page ), size )) {
91
110
dma_free_contiguous (dev , page , size );
@@ -142,7 +161,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
142
161
gfp |= __GFP_NOWARN ;
143
162
144
163
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
145
- !force_dma_unencrypted (dev )) {
164
+ !force_dma_unencrypted (dev ) && ! is_swiotlb_for_alloc ( dev ) ) {
146
165
page = __dma_direct_alloc_pages (dev , size , gfp & ~__GFP_ZERO );
147
166
if (!page )
148
167
return NULL ;
@@ -155,18 +174,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
155
174
}
156
175
157
176
if (!IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
158
- !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
159
- !dev_is_dma_coherent (dev ))
177
+ !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) && ! dev_is_dma_coherent ( dev ) &&
178
+ !is_swiotlb_for_alloc (dev ))
160
179
return arch_dma_alloc (dev , size , dma_handle , gfp , attrs );
161
180
162
181
/*
163
182
* Remapping or decrypting memory may block. If either is required and
164
183
* we can't block, allocate the memory from the atomic pools.
184
+ * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
185
+ * set up another device coherent pool by shared-dma-pool and use
186
+ * dma_alloc_from_dev_coherent instead.
165
187
*/
166
188
if (IS_ENABLED (CONFIG_DMA_COHERENT_POOL ) &&
167
189
!gfpflags_allow_blocking (gfp ) &&
168
190
(force_dma_unencrypted (dev ) ||
169
- (IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) && !dev_is_dma_coherent (dev ))))
191
+ (IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
192
+ !dev_is_dma_coherent (dev ))) &&
193
+ !is_swiotlb_for_alloc (dev ))
170
194
return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
171
195
172
196
/* we always manually zero the memory once we are done */
@@ -237,7 +261,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
237
261
return NULL ;
238
262
}
239
263
out_free_pages :
240
- dma_free_contiguous (dev , page , size );
264
+ __dma_direct_free_pages (dev , page , size );
241
265
return NULL ;
242
266
}
243
267
@@ -247,15 +271,15 @@ void dma_direct_free(struct device *dev, size_t size,
247
271
unsigned int page_order = get_order (size );
248
272
249
273
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING ) &&
250
- !force_dma_unencrypted (dev )) {
274
+ !force_dma_unencrypted (dev ) && ! is_swiotlb_for_alloc ( dev ) ) {
251
275
/* cpu_addr is a struct page cookie, not a kernel address */
252
276
dma_free_contiguous (dev , cpu_addr , size );
253
277
return ;
254
278
}
255
279
256
280
if (!IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
257
- !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
258
- !dev_is_dma_coherent (dev )) {
281
+ !IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) && ! dev_is_dma_coherent ( dev ) &&
282
+ !is_swiotlb_for_alloc (dev )) {
259
283
arch_dma_free (dev , size , cpu_addr , dma_addr , attrs );
260
284
return ;
261
285
}
@@ -273,7 +297,7 @@ void dma_direct_free(struct device *dev, size_t size,
273
297
else if (IS_ENABLED (CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED ))
274
298
arch_dma_clear_uncached (cpu_addr , size );
275
299
276
- dma_free_contiguous (dev , dma_direct_to_page (dev , dma_addr ), size );
300
+ __dma_direct_free_pages (dev , dma_direct_to_page (dev , dma_addr ), size );
277
301
}
278
302
279
303
struct page * dma_direct_alloc_pages (struct device * dev , size_t size ,
@@ -283,7 +307,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
283
307
void * ret ;
284
308
285
309
if (IS_ENABLED (CONFIG_DMA_COHERENT_POOL ) &&
286
- force_dma_unencrypted (dev ) && !gfpflags_allow_blocking (gfp ))
310
+ force_dma_unencrypted (dev ) && !gfpflags_allow_blocking (gfp ) &&
311
+ !is_swiotlb_for_alloc (dev ))
287
312
return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
288
313
289
314
page = __dma_direct_alloc_pages (dev , size , gfp );
@@ -310,7 +335,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
310
335
* dma_handle = phys_to_dma_direct (dev , page_to_phys (page ));
311
336
return page ;
312
337
out_free_pages :
313
- dma_free_contiguous (dev , page , size );
338
+ __dma_direct_free_pages (dev , page , size );
314
339
return NULL ;
315
340
}
316
341
@@ -329,7 +354,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
329
354
if (force_dma_unencrypted (dev ))
330
355
set_memory_encrypted ((unsigned long )vaddr , 1 << page_order );
331
356
332
- dma_free_contiguous (dev , page , size );
357
+ __dma_direct_free_pages (dev , page , size );
333
358
}
334
359
335
360
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE ) || \
0 commit comments