Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 463e862

Browse files
willdeaconkonradwilk
authored andcommitted
swiotlb: Convert io_default_tlb_mem to static allocation
Since commit 69031f5 ("swiotlb: Set dev->dma_io_tlb_mem to the swiotlb pool used"), 'struct device' may hold a copy of the global 'io_default_tlb_mem' pointer if the device is using swiotlb for DMA. A subsequent call to swiotlb_exit() will therefore leave dangling pointers behind in these device structures, resulting in KASAN splats such as: | BUG: KASAN: use-after-free in __iommu_dma_unmap_swiotlb+0x64/0xb0 | Read of size 8 at addr ffff8881d7830000 by task swapper/0/0 | | CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.12.0-rc3-debug #1 | Hardware name: HP HP Desktop M01-F1xxx/87D6, BIOS F.12 12/17/2020 | Call Trace: | <IRQ> | dump_stack+0x9c/0xcf | print_address_description.constprop.0+0x18/0x130 | kasan_report.cold+0x7f/0x111 | __iommu_dma_unmap_swiotlb+0x64/0xb0 | nvme_pci_complete_rq+0x73/0x130 | blk_complete_reqs+0x6f/0x80 | __do_softirq+0xfc/0x3be Convert 'io_default_tlb_mem' to a static structure, so that the per-device pointers remain valid after swiotlb_exit() has been invoked. All users are updated to reference the static structure directly, using the 'nslabs' field to determine whether swiotlb has been initialised. The 'slots' array is still allocated dynamically and referenced via a pointer rather than a flexible array member. Cc: Claire Chang <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Konrad Rzeszutek Wilk <[email protected]> Fixes: 69031f5 ("swiotlb: Set dev->dma_io_tlb_mem to the swiotlb pool used") Reported-by: Nathan Chancellor <[email protected]> Tested-by: Nathan Chancellor <[email protected]> Tested-by: Claire Chang <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
1 parent 85044eb commit 463e862

File tree

4 files changed

+41
-35
lines changed

4 files changed

+41
-35
lines changed

drivers/base/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2848,7 +2848,7 @@ void device_initialize(struct device *dev)
28482848
dev->dma_coherent = dma_default_coherent;
28492849
#endif
28502850
#ifdef CONFIG_SWIOTLB
2851-
dev->dma_io_tlb_mem = io_tlb_default_mem;
2851+
dev->dma_io_tlb_mem = &io_tlb_default_mem;
28522852
#endif
28532853
}
28542854
EXPORT_SYMBOL_GPL(device_initialize);

drivers/xen/swiotlb-xen.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ int __ref xen_swiotlb_init(void)
164164
int rc = -ENOMEM;
165165
char *start;
166166

167-
if (io_tlb_default_mem != NULL) {
167+
if (io_tlb_default_mem.nslabs) {
168168
pr_warn("swiotlb buffer already initialized\n");
169169
return -EEXIST;
170170
}
@@ -547,7 +547,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
547547
static int
548548
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
549549
{
550-
return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
550+
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
551551
}
552552

553553
const struct dma_map_ops xen_swiotlb_dma_ops = {

include/linux/swiotlb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,9 +103,9 @@ struct io_tlb_mem {
103103
phys_addr_t orig_addr;
104104
size_t alloc_size;
105105
unsigned int list;
106-
} slots[];
106+
} *slots;
107107
};
108-
extern struct io_tlb_mem *io_tlb_default_mem;
108+
extern struct io_tlb_mem io_tlb_default_mem;
109109

110110
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
111111
{

kernel/dma/swiotlb.c

Lines changed: 36 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070

7171
enum swiotlb_force swiotlb_force;
7272

73-
struct io_tlb_mem *io_tlb_default_mem;
73+
struct io_tlb_mem io_tlb_default_mem;
7474

7575
/*
7676
* Max segment that we can provide which (if pages are contingous) will
@@ -101,7 +101,7 @@ early_param("swiotlb", setup_io_tlb_npages);
101101

102102
unsigned int swiotlb_max_segment(void)
103103
{
104-
return io_tlb_default_mem ? max_segment : 0;
104+
return io_tlb_default_mem.nslabs ? max_segment : 0;
105105
}
106106
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
107107

@@ -134,9 +134,9 @@ void __init swiotlb_adjust_size(unsigned long size)
134134

135135
void swiotlb_print_info(void)
136136
{
137-
struct io_tlb_mem *mem = io_tlb_default_mem;
137+
struct io_tlb_mem *mem = &io_tlb_default_mem;
138138

139-
if (!mem) {
139+
if (!mem->nslabs) {
140140
pr_warn("No low mem\n");
141141
return;
142142
}
@@ -163,11 +163,11 @@ static inline unsigned long nr_slots(u64 val)
163163
*/
164164
void __init swiotlb_update_mem_attributes(void)
165165
{
166-
struct io_tlb_mem *mem = io_tlb_default_mem;
166+
struct io_tlb_mem *mem = &io_tlb_default_mem;
167167
void *vaddr;
168168
unsigned long bytes;
169169

170-
if (!mem || mem->late_alloc)
170+
if (!mem->nslabs || mem->late_alloc)
171171
return;
172172
vaddr = phys_to_virt(mem->start);
173173
bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
@@ -201,25 +201,24 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
201201

202202
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
203203
{
204-
struct io_tlb_mem *mem;
204+
struct io_tlb_mem *mem = &io_tlb_default_mem;
205205
size_t alloc_size;
206206

207207
if (swiotlb_force == SWIOTLB_NO_FORCE)
208208
return 0;
209209

210210
/* protect against double initialization */
211-
if (WARN_ON_ONCE(io_tlb_default_mem))
211+
if (WARN_ON_ONCE(mem->nslabs))
212212
return -ENOMEM;
213213

214-
alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs));
215-
mem = memblock_alloc(alloc_size, PAGE_SIZE);
216-
if (!mem)
214+
alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
215+
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
216+
if (!mem->slots)
217217
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
218218
__func__, alloc_size, PAGE_SIZE);
219219

220220
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
221221

222-
io_tlb_default_mem = mem;
223222
if (verbose)
224223
swiotlb_print_info();
225224
swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
@@ -304,45 +303,43 @@ swiotlb_late_init_with_default_size(size_t default_size)
304303
int
305304
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
306305
{
307-
struct io_tlb_mem *mem;
306+
struct io_tlb_mem *mem = &io_tlb_default_mem;
308307
unsigned long bytes = nslabs << IO_TLB_SHIFT;
309308

310309
if (swiotlb_force == SWIOTLB_NO_FORCE)
311310
return 0;
312311

313312
/* protect against double initialization */
314-
if (WARN_ON_ONCE(io_tlb_default_mem))
313+
if (WARN_ON_ONCE(mem->nslabs))
315314
return -ENOMEM;
316315

317-
mem = (void *)__get_free_pages(GFP_KERNEL,
318-
get_order(struct_size(mem, slots, nslabs)));
319-
if (!mem)
316+
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
317+
get_order(array_size(sizeof(*mem->slots), nslabs)));
318+
if (!mem->slots)
320319
return -ENOMEM;
321320

322-
memset(mem, 0, sizeof(*mem));
323321
set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
324322
swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
325323

326-
io_tlb_default_mem = mem;
327324
swiotlb_print_info();
328325
swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
329326
return 0;
330327
}
331328

332329
void __init swiotlb_exit(void)
333330
{
334-
struct io_tlb_mem *mem = io_tlb_default_mem;
335331
size_t size;
332+
struct io_tlb_mem *mem = &io_tlb_default_mem;
336333

337-
if (!mem)
334+
if (!mem->nslabs)
338335
return;
339336

340-
size = struct_size(mem, slots, mem->nslabs);
337+
size = array_size(sizeof(*mem->slots), mem->nslabs);
341338
if (mem->late_alloc)
342-
free_pages((unsigned long)mem, get_order(size));
339+
free_pages((unsigned long)mem->slots, get_order(size));
343340
else
344-
memblock_free_late(__pa(mem), PAGE_ALIGN(size));
345-
io_tlb_default_mem = NULL;
341+
memblock_free_late(__pa(mem->slots), PAGE_ALIGN(size));
342+
memset(mem, 0, sizeof(*mem));
346343
}
347344

348345
/*
@@ -696,7 +693,9 @@ size_t swiotlb_max_mapping_size(struct device *dev)
696693

697694
bool is_swiotlb_active(struct device *dev)
698695
{
699-
return dev->dma_io_tlb_mem != NULL;
696+
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
697+
698+
return mem && mem->nslabs;
700699
}
701700
EXPORT_SYMBOL_GPL(is_swiotlb_active);
702701

@@ -711,10 +710,10 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
711710

712711
static int __init swiotlb_create_default_debugfs(void)
713712
{
714-
struct io_tlb_mem *mem = io_tlb_default_mem;
713+
struct io_tlb_mem *mem = &io_tlb_default_mem;
715714

716715
debugfs_dir = debugfs_create_dir("swiotlb", NULL);
717-
if (mem) {
716+
if (mem->nslabs) {
718717
mem->debugfs = debugfs_dir;
719718
swiotlb_create_debugfs_files(mem);
720719
}
@@ -783,10 +782,17 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
783782
* to it.
784783
*/
785784
if (!mem) {
786-
mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
785+
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
787786
if (!mem)
788787
return -ENOMEM;
789788

789+
mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs),
790+
GFP_KERNEL);
791+
if (!mem->slots) {
792+
kfree(mem);
793+
return -ENOMEM;
794+
}
795+
790796
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
791797
rmem->size >> PAGE_SHIFT);
792798
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
@@ -806,7 +812,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
806812
static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
807813
struct device *dev)
808814
{
809-
dev->dma_io_tlb_mem = io_tlb_default_mem;
815+
dev->dma_io_tlb_mem = &io_tlb_default_mem;
810816
}
811817

812818
static const struct reserved_mem_ops rmem_swiotlb_ops = {

0 commit comments

Comments
 (0)