Skip to content

Commit f4111e3

Browse files
Claire Changkonradwilk
authored andcommitted
swiotlb: Add restricted DMA alloc/free support
Add the functions, swiotlb_{alloc,free} and is_swiotlb_for_alloc to support the memory allocation from restricted DMA pool. The restricted DMA pool is preferred if available. Note that since coherent allocation needs remapping, one must set up another device coherent pool by shared-dma-pool and use dma_alloc_from_dev_coherent instead for atomic coherent allocation. Signed-off-by: Claire Chang <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Tested-by: Stefano Stabellini <[email protected]> Tested-by: Will Deacon <[email protected]> Acked-by: Stefano Stabellini <[email protected]> Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
1 parent 7034787 commit f4111e3

File tree

3 files changed

+99
-14
lines changed

3 files changed

+99
-14
lines changed

include/linux/swiotlb.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ extern enum swiotlb_force swiotlb_force;
8585
* @debugfs: The dentry to debugfs.
8686
* @late_alloc: %true if allocated using the page allocator
8787
* @force_bounce: %true if swiotlb bouncing is forced
88+
* @for_alloc: %true if the pool is used for memory allocation
8889
*/
8990
struct io_tlb_mem {
9091
phys_addr_t start;
@@ -96,6 +97,7 @@ struct io_tlb_mem {
9697
struct dentry *debugfs;
9798
bool late_alloc;
9899
bool force_bounce;
100+
bool for_alloc;
99101
struct io_tlb_slot {
100102
phys_addr_t orig_addr;
101103
size_t alloc_size;
@@ -158,4 +160,28 @@ static inline void swiotlb_adjust_size(unsigned long size)
158160
extern void swiotlb_print_info(void);
159161
extern void swiotlb_set_max_segment(unsigned int);
160162

163+
#ifdef CONFIG_DMA_RESTRICTED_POOL
164+
struct page *swiotlb_alloc(struct device *dev, size_t size);
165+
bool swiotlb_free(struct device *dev, struct page *page, size_t size);
166+
167+
static inline bool is_swiotlb_for_alloc(struct device *dev)
168+
{
169+
return dev->dma_io_tlb_mem->for_alloc;
170+
}
171+
#else
172+
static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
173+
{
174+
return NULL;
175+
}
176+
static inline bool swiotlb_free(struct device *dev, struct page *page,
177+
size_t size)
178+
{
179+
return false;
180+
}
181+
static inline bool is_swiotlb_for_alloc(struct device *dev)
182+
{
183+
return false;
184+
}
185+
#endif /* CONFIG_DMA_RESTRICTED_POOL */
186+
161187
#endif /* __LINUX_SWIOTLB_H */

kernel/dma/direct.c

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,15 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
7575
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
7676
}
7777

78+
static void __dma_direct_free_pages(struct device *dev, struct page *page,
79+
size_t size)
80+
{
81+
if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
82+
swiotlb_free(dev, page, size))
83+
return;
84+
dma_free_contiguous(dev, page, size);
85+
}
86+
7887
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
7988
gfp_t gfp)
8089
{
@@ -86,6 +95,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
8695

8796
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
8897
&phys_limit);
98+
if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
99+
is_swiotlb_for_alloc(dev)) {
100+
page = swiotlb_alloc(dev, size);
101+
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102+
__dma_direct_free_pages(dev, page, size);
103+
return NULL;
104+
}
105+
return page;
106+
}
107+
89108
page = dma_alloc_contiguous(dev, size, gfp);
90109
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
91110
dma_free_contiguous(dev, page, size);
@@ -142,7 +161,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
142161
gfp |= __GFP_NOWARN;
143162

144163
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
145-
!force_dma_unencrypted(dev)) {
164+
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
146165
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
147166
if (!page)
148167
return NULL;
@@ -155,18 +174,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
155174
}
156175

157176
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
158-
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
159-
!dev_is_dma_coherent(dev))
177+
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
178+
!is_swiotlb_for_alloc(dev))
160179
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
161180

162181
/*
163182
* Remapping or decrypting memory may block. If either is required and
164183
* we can't block, allocate the memory from the atomic pools.
184+
* If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
185+
* set up another device coherent pool by shared-dma-pool and use
186+
* dma_alloc_from_dev_coherent instead.
165187
*/
166188
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
167189
!gfpflags_allow_blocking(gfp) &&
168190
(force_dma_unencrypted(dev) ||
169-
(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
191+
(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
192+
!dev_is_dma_coherent(dev))) &&
193+
!is_swiotlb_for_alloc(dev))
170194
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
171195

172196
/* we always manually zero the memory once we are done */
@@ -237,7 +261,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
237261
return NULL;
238262
}
239263
out_free_pages:
240-
dma_free_contiguous(dev, page, size);
264+
__dma_direct_free_pages(dev, page, size);
241265
return NULL;
242266
}
243267

@@ -247,15 +271,15 @@ void dma_direct_free(struct device *dev, size_t size,
247271
unsigned int page_order = get_order(size);
248272

249273
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
250-
!force_dma_unencrypted(dev)) {
274+
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
251275
/* cpu_addr is a struct page cookie, not a kernel address */
252276
dma_free_contiguous(dev, cpu_addr, size);
253277
return;
254278
}
255279

256280
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
257-
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
258-
!dev_is_dma_coherent(dev)) {
281+
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
282+
!is_swiotlb_for_alloc(dev)) {
259283
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
260284
return;
261285
}
@@ -273,7 +297,7 @@ void dma_direct_free(struct device *dev, size_t size,
273297
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
274298
arch_dma_clear_uncached(cpu_addr, size);
275299

276-
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
300+
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
277301
}
278302

279303
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
@@ -283,7 +307,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
283307
void *ret;
284308

285309
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
286-
force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
310+
force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
311+
!is_swiotlb_for_alloc(dev))
287312
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
288313

289314
page = __dma_direct_alloc_pages(dev, size, gfp);
@@ -310,7 +335,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
310335
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
311336
return page;
312337
out_free_pages:
313-
dma_free_contiguous(dev, page, size);
338+
__dma_direct_free_pages(dev, page, size);
314339
return NULL;
315340
}
316341

@@ -329,7 +354,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
329354
if (force_dma_unencrypted(dev))
330355
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
331356

332-
dma_free_contiguous(dev, page, size);
357+
__dma_direct_free_pages(dev, page, size);
333358
}
334359

335360
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \

kernel/dma/swiotlb.c

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -462,8 +462,9 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
462462

463463
index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
464464
do {
465-
if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
466-
(orig_addr & iotlb_align_mask)) {
465+
if (orig_addr &&
466+
(slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
467+
(orig_addr & iotlb_align_mask)) {
467468
index = wrap_index(mem, index + 1);
468469
continue;
469470
}
@@ -702,3 +703,36 @@ static int __init swiotlb_create_default_debugfs(void)
702703
late_initcall(swiotlb_create_default_debugfs);
703704

704705
#endif
706+
707+
#ifdef CONFIG_DMA_RESTRICTED_POOL
708+
struct page *swiotlb_alloc(struct device *dev, size_t size)
709+
{
710+
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
711+
phys_addr_t tlb_addr;
712+
int index;
713+
714+
if (!mem)
715+
return NULL;
716+
717+
index = swiotlb_find_slots(dev, 0, size);
718+
if (index == -1)
719+
return NULL;
720+
721+
tlb_addr = slot_addr(mem->start, index);
722+
723+
return pfn_to_page(PFN_DOWN(tlb_addr));
724+
}
725+
726+
bool swiotlb_free(struct device *dev, struct page *page, size_t size)
727+
{
728+
phys_addr_t tlb_addr = page_to_phys(page);
729+
730+
if (!is_swiotlb_buffer(dev, tlb_addr))
731+
return false;
732+
733+
swiotlb_release_slots(dev, tlb_addr);
734+
735+
return true;
736+
}
737+
738+
#endif /* CONFIG_DMA_RESTRICTED_POOL */

0 commit comments

Comments
 (0)