12
12
#include <linux/bootmem.h>
13
13
#include <linux/module.h>
14
14
#include <linux/mm.h>
15
+ #include <linux/genalloc.h>
15
16
#include <linux/gfp.h>
16
17
#include <linux/errno.h>
17
18
#include <linux/list.h>
@@ -314,23 +315,13 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
314
315
}
315
316
316
317
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
318
+ static struct gen_pool * atomic_pool ;
317
319
318
- struct dma_pool {
319
- size_t size ;
320
- spinlock_t lock ;
321
- unsigned long * bitmap ;
322
- unsigned long nr_pages ;
323
- void * vaddr ;
324
- struct page * * pages ;
325
- };
326
-
327
- static struct dma_pool atomic_pool = {
328
- .size = DEFAULT_DMA_COHERENT_POOL_SIZE ,
329
- };
320
+ static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE ;
330
321
331
322
static int __init early_coherent_pool (char * p )
332
323
{
333
- atomic_pool . size = memparse (p , & p );
324
+ atomic_pool_size = memparse (p , & p );
334
325
return 0 ;
335
326
}
336
327
early_param ("coherent_pool" , early_coherent_pool );
@@ -340,67 +331,59 @@ void __init init_dma_coherent_pool_size(unsigned long size)
340
331
/*
341
332
* Catch any attempt to set the pool size too late.
342
333
*/
343
- BUG_ON (atomic_pool . vaddr );
334
+ BUG_ON (atomic_pool );
344
335
345
336
/*
346
337
* Set architecture specific coherent pool size only if
347
338
* it has not been changed by kernel command line parameter.
348
339
*/
349
- if (atomic_pool . size == DEFAULT_DMA_COHERENT_POOL_SIZE )
350
- atomic_pool . size = size ;
340
+ if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE )
341
+ atomic_pool_size = size ;
351
342
}
352
343
353
344
/*
354
345
* Initialise the coherent pool for atomic allocations.
355
346
*/
356
347
static int __init atomic_pool_init (void )
357
348
{
358
- struct dma_pool * pool = & atomic_pool ;
359
349
pgprot_t prot = pgprot_dmacoherent (PAGE_KERNEL );
360
350
gfp_t gfp = GFP_KERNEL | GFP_DMA ;
361
- unsigned long nr_pages = pool -> size >> PAGE_SHIFT ;
362
- unsigned long * bitmap ;
363
351
struct page * page ;
364
- struct page * * pages ;
365
352
void * ptr ;
366
- int bitmap_size = BITS_TO_LONGS (nr_pages ) * sizeof (long );
367
-
368
- bitmap = kzalloc (bitmap_size , GFP_KERNEL );
369
- if (!bitmap )
370
- goto no_bitmap ;
371
353
372
- pages = kzalloc ( nr_pages * sizeof ( struct page * ), GFP_KERNEL );
373
- if (!pages )
374
- goto no_pages ;
354
+ atomic_pool = gen_pool_create ( PAGE_SHIFT , -1 );
355
+ if (!atomic_pool )
356
+ goto out ;
375
357
376
358
if (dev_get_cma_area (NULL ))
377
- ptr = __alloc_from_contiguous (NULL , pool -> size , prot , & page ,
378
- atomic_pool_init );
359
+ ptr = __alloc_from_contiguous (NULL , atomic_pool_size , prot ,
360
+ & page , atomic_pool_init );
379
361
else
380
- ptr = __alloc_remap_buffer (NULL , pool -> size , gfp , prot , & page ,
381
- atomic_pool_init );
362
+ ptr = __alloc_remap_buffer (NULL , atomic_pool_size , gfp , prot ,
363
+ & page , atomic_pool_init );
382
364
if (ptr ) {
383
- int i ;
384
-
385
- for (i = 0 ; i < nr_pages ; i ++ )
386
- pages [i ] = page + i ;
387
-
388
- spin_lock_init (& pool -> lock );
389
- pool -> vaddr = ptr ;
390
- pool -> pages = pages ;
391
- pool -> bitmap = bitmap ;
392
- pool -> nr_pages = nr_pages ;
393
- pr_info ("DMA: preallocated %u KiB pool for atomic coherent allocations\n" ,
394
- (unsigned )pool -> size / 1024 );
365
+ int ret ;
366
+
367
+ ret = gen_pool_add_virt (atomic_pool , (unsigned long )ptr ,
368
+ page_to_phys (page ),
369
+ atomic_pool_size , -1 );
370
+ if (ret )
371
+ goto destroy_genpool ;
372
+
373
+ gen_pool_set_algo (atomic_pool ,
374
+ gen_pool_first_fit_order_align ,
375
+ (void * )PAGE_SHIFT );
376
+ pr_info ("DMA: preallocated %zd KiB pool for atomic coherent allocations\n" ,
377
+ atomic_pool_size / 1024 );
395
378
return 0 ;
396
379
}
397
380
398
- kfree ( pages );
399
- no_pages :
400
- kfree ( bitmap ) ;
401
- no_bitmap :
402
- pr_err ("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n" ,
403
- ( unsigned ) pool -> size / 1024 );
381
+ destroy_genpool :
382
+ gen_pool_destroy ( atomic_pool );
383
+ atomic_pool = NULL ;
384
+ out :
385
+ pr_err ("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n" ,
386
+ atomic_pool_size / 1024 );
404
387
return - ENOMEM ;
405
388
}
406
389
/*
@@ -504,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
504
487
505
488
static void * __alloc_from_pool (size_t size , struct page * * ret_page )
506
489
{
507
- struct dma_pool * pool = & atomic_pool ;
508
- unsigned int count = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
509
- unsigned int pageno ;
510
- unsigned long flags ;
490
+ unsigned long val ;
511
491
void * ptr = NULL ;
512
- unsigned long align_mask ;
513
492
514
- if (!pool -> vaddr ) {
493
+ if (!atomic_pool ) {
515
494
WARN (1 , "coherent pool not initialised!\n" );
516
495
return NULL ;
517
496
}
518
497
519
- /*
520
- * Align the region allocation - allocations from pool are rather
521
- * small, so align them to their order in pages, minimum is a page
522
- * size. This helps reduce fragmentation of the DMA space.
523
- */
524
- align_mask = (1 << get_order (size )) - 1 ;
525
-
526
- spin_lock_irqsave (& pool -> lock , flags );
527
- pageno = bitmap_find_next_zero_area (pool -> bitmap , pool -> nr_pages ,
528
- 0 , count , align_mask );
529
- if (pageno < pool -> nr_pages ) {
530
- bitmap_set (pool -> bitmap , pageno , count );
531
- ptr = pool -> vaddr + PAGE_SIZE * pageno ;
532
- * ret_page = pool -> pages [pageno ];
533
- } else {
534
- pr_err_once ("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
535
- "Please increase it with coherent_pool= kernel parameter!\n" ,
536
- (unsigned )pool -> size / 1024 );
498
+ val = gen_pool_alloc (atomic_pool , size );
499
+ if (val ) {
500
+ phys_addr_t phys = gen_pool_virt_to_phys (atomic_pool , val );
501
+
502
+ * ret_page = phys_to_page (phys );
503
+ ptr = (void * )val ;
537
504
}
538
- spin_unlock_irqrestore (& pool -> lock , flags );
539
505
540
506
return ptr ;
541
507
}
542
508
543
509
static bool __in_atomic_pool (void * start , size_t size )
544
510
{
545
- struct dma_pool * pool = & atomic_pool ;
546
- void * end = start + size ;
547
- void * pool_start = pool -> vaddr ;
548
- void * pool_end = pool -> vaddr + pool -> size ;
549
-
550
- if (start < pool_start || start >= pool_end )
551
- return false;
552
-
553
- if (end <= pool_end )
554
- return true;
555
-
556
- WARN (1 , "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n" ,
557
- start , end - 1 , pool_start , pool_end - 1 );
558
-
559
- return false;
511
+ return addr_in_gen_pool (atomic_pool , (unsigned long )start , size );
560
512
}
561
513
562
514
static int __free_from_pool (void * start , size_t size )
563
515
{
564
- struct dma_pool * pool = & atomic_pool ;
565
- unsigned long pageno , count ;
566
- unsigned long flags ;
567
-
568
516
if (!__in_atomic_pool (start , size ))
569
517
return 0 ;
570
518
571
- pageno = (start - pool -> vaddr ) >> PAGE_SHIFT ;
572
- count = size >> PAGE_SHIFT ;
573
-
574
- spin_lock_irqsave (& pool -> lock , flags );
575
- bitmap_clear (pool -> bitmap , pageno , count );
576
- spin_unlock_irqrestore (& pool -> lock , flags );
519
+ gen_pool_free (atomic_pool , (unsigned long )start , size );
577
520
578
521
return 1 ;
579
522
}
@@ -1316,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
1316
1259
1317
1260
static struct page * * __atomic_get_pages (void * addr )
1318
1261
{
1319
- struct dma_pool * pool = & atomic_pool ;
1320
- struct page * * pages = pool -> pages ;
1321
- int offs = (addr - pool -> vaddr ) >> PAGE_SHIFT ;
1262
+ struct page * page ;
1263
+ phys_addr_t phys ;
1264
+
1265
+ phys = gen_pool_virt_to_phys (atomic_pool , (unsigned long )addr );
1266
+ page = phys_to_page (phys );
1322
1267
1323
- return pages + offs ;
1268
+ return ( struct page * * ) page ;
1324
1269
}
1325
1270
1326
1271
static struct page * * __iommu_get_pages (void * cpu_addr , struct dma_attrs * attrs )
0 commit comments