Skip to content

Commit 36d0fd2

Browse files
labbotttorvalds
authored andcommitted
arm: use genalloc for the atomic pool
ARM currently uses a bitmap for tracking atomic allocations. genalloc already handles this type of memory pool allocation so switch to using that instead. Signed-off-by: Laura Abbott <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: David Riley <[email protected]> Cc: Olof Johansson <[email protected]> Cc: Ritesh Harjain <[email protected]> Cc: Russell King <[email protected]> Cc: Thierry Reding <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 513510d commit 36d0fd2

File tree

2 files changed

+50
-104
lines changed

2 files changed

+50
-104
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ config ARM
1414
select CLONE_BACKWARDS
1515
select CPU_PM if (SUSPEND || CPU_IDLE)
1616
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
17+
select GENERIC_ALLOCATOR
1718
select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
1819
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
1920
select GENERIC_IDLE_POLL_SETUP

arch/arm/mm/dma-mapping.c

Lines changed: 49 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/bootmem.h>
1313
#include <linux/module.h>
1414
#include <linux/mm.h>
15+
#include <linux/genalloc.h>
1516
#include <linux/gfp.h>
1617
#include <linux/errno.h>
1718
#include <linux/list.h>
@@ -314,23 +315,13 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
314315
}
315316

316317
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
318+
static struct gen_pool *atomic_pool;
317319

318-
struct dma_pool {
319-
size_t size;
320-
spinlock_t lock;
321-
unsigned long *bitmap;
322-
unsigned long nr_pages;
323-
void *vaddr;
324-
struct page **pages;
325-
};
326-
327-
static struct dma_pool atomic_pool = {
328-
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
329-
};
320+
static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
330321

331322
static int __init early_coherent_pool(char *p)
332323
{
333-
atomic_pool.size = memparse(p, &p);
324+
atomic_pool_size = memparse(p, &p);
334325
return 0;
335326
}
336327
early_param("coherent_pool", early_coherent_pool);
@@ -340,67 +331,59 @@ void __init init_dma_coherent_pool_size(unsigned long size)
340331
/*
341332
* Catch any attempt to set the pool size too late.
342333
*/
343-
BUG_ON(atomic_pool.vaddr);
334+
BUG_ON(atomic_pool);
344335

345336
/*
346337
* Set architecture specific coherent pool size only if
347338
* it has not been changed by kernel command line parameter.
348339
*/
349-
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
350-
atomic_pool.size = size;
340+
if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
341+
atomic_pool_size = size;
351342
}
352343

353344
/*
354345
* Initialise the coherent pool for atomic allocations.
355346
*/
356347
static int __init atomic_pool_init(void)
357348
{
358-
struct dma_pool *pool = &atomic_pool;
359349
pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
360350
gfp_t gfp = GFP_KERNEL | GFP_DMA;
361-
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
362-
unsigned long *bitmap;
363351
struct page *page;
364-
struct page **pages;
365352
void *ptr;
366-
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
367-
368-
bitmap = kzalloc(bitmap_size, GFP_KERNEL);
369-
if (!bitmap)
370-
goto no_bitmap;
371353

372-
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
373-
if (!pages)
374-
goto no_pages;
354+
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
355+
if (!atomic_pool)
356+
goto out;
375357

376358
if (dev_get_cma_area(NULL))
377-
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
378-
atomic_pool_init);
359+
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
360+
&page, atomic_pool_init);
379361
else
380-
ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
381-
atomic_pool_init);
362+
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
363+
&page, atomic_pool_init);
382364
if (ptr) {
383-
int i;
384-
385-
for (i = 0; i < nr_pages; i++)
386-
pages[i] = page + i;
387-
388-
spin_lock_init(&pool->lock);
389-
pool->vaddr = ptr;
390-
pool->pages = pages;
391-
pool->bitmap = bitmap;
392-
pool->nr_pages = nr_pages;
393-
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
394-
(unsigned)pool->size / 1024);
365+
int ret;
366+
367+
ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
368+
page_to_phys(page),
369+
atomic_pool_size, -1);
370+
if (ret)
371+
goto destroy_genpool;
372+
373+
gen_pool_set_algo(atomic_pool,
374+
gen_pool_first_fit_order_align,
375+
(void *)PAGE_SHIFT);
376+
pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
377+
atomic_pool_size / 1024);
395378
return 0;
396379
}
397380

398-
kfree(pages);
399-
no_pages:
400-
kfree(bitmap);
401-
no_bitmap:
402-
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
403-
(unsigned)pool->size / 1024);
381+
destroy_genpool:
382+
gen_pool_destroy(atomic_pool);
383+
atomic_pool = NULL;
384+
out:
385+
pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
386+
atomic_pool_size / 1024);
404387
return -ENOMEM;
405388
}
406389
/*
@@ -504,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
504487

505488
static void *__alloc_from_pool(size_t size, struct page **ret_page)
506489
{
507-
struct dma_pool *pool = &atomic_pool;
508-
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
509-
unsigned int pageno;
510-
unsigned long flags;
490+
unsigned long val;
511491
void *ptr = NULL;
512-
unsigned long align_mask;
513492

514-
if (!pool->vaddr) {
493+
if (!atomic_pool) {
515494
WARN(1, "coherent pool not initialised!\n");
516495
return NULL;
517496
}
518497

519-
/*
520-
* Align the region allocation - allocations from pool are rather
521-
* small, so align them to their order in pages, minimum is a page
522-
* size. This helps reduce fragmentation of the DMA space.
523-
*/
524-
align_mask = (1 << get_order(size)) - 1;
525-
526-
spin_lock_irqsave(&pool->lock, flags);
527-
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
528-
0, count, align_mask);
529-
if (pageno < pool->nr_pages) {
530-
bitmap_set(pool->bitmap, pageno, count);
531-
ptr = pool->vaddr + PAGE_SIZE * pageno;
532-
*ret_page = pool->pages[pageno];
533-
} else {
534-
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
535-
"Please increase it with coherent_pool= kernel parameter!\n",
536-
(unsigned)pool->size / 1024);
498+
val = gen_pool_alloc(atomic_pool, size);
499+
if (val) {
500+
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
501+
502+
*ret_page = phys_to_page(phys);
503+
ptr = (void *)val;
537504
}
538-
spin_unlock_irqrestore(&pool->lock, flags);
539505

540506
return ptr;
541507
}
542508

543509
static bool __in_atomic_pool(void *start, size_t size)
544510
{
545-
struct dma_pool *pool = &atomic_pool;
546-
void *end = start + size;
547-
void *pool_start = pool->vaddr;
548-
void *pool_end = pool->vaddr + pool->size;
549-
550-
if (start < pool_start || start >= pool_end)
551-
return false;
552-
553-
if (end <= pool_end)
554-
return true;
555-
556-
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
557-
start, end - 1, pool_start, pool_end - 1);
558-
559-
return false;
511+
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
560512
}
561513

562514
static int __free_from_pool(void *start, size_t size)
563515
{
564-
struct dma_pool *pool = &atomic_pool;
565-
unsigned long pageno, count;
566-
unsigned long flags;
567-
568516
if (!__in_atomic_pool(start, size))
569517
return 0;
570518

571-
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
572-
count = size >> PAGE_SHIFT;
573-
574-
spin_lock_irqsave(&pool->lock, flags);
575-
bitmap_clear(pool->bitmap, pageno, count);
576-
spin_unlock_irqrestore(&pool->lock, flags);
519+
gen_pool_free(atomic_pool, (unsigned long)start, size);
577520

578521
return 1;
579522
}
@@ -1316,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
13161259

13171260
static struct page **__atomic_get_pages(void *addr)
13181261
{
1319-
struct dma_pool *pool = &atomic_pool;
1320-
struct page **pages = pool->pages;
1321-
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1262+
struct page *page;
1263+
phys_addr_t phys;
1264+
1265+
phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1266+
page = phys_to_page(phys);
13221267

1323-
return pages + offs;
1268+
return (struct page **)page;
13241269
}
13251270

13261271
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)

0 commit comments

Comments
 (0)