20
20
#include <linux/gfp.h>
21
21
#include <linux/export.h>
22
22
#include <linux/slab.h>
23
+ #include <linux/genalloc.h>
23
24
#include <linux/dma-mapping.h>
24
25
#include <linux/dma-contiguous.h>
25
26
#include <linux/vmalloc.h>
@@ -38,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
38
39
return prot ;
39
40
}
40
41
42
+ static struct gen_pool * atomic_pool ;
43
+
44
+ #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45
+ static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE ;
46
+
47
+ static int __init early_coherent_pool (char * p )
48
+ {
49
+ atomic_pool_size = memparse (p , & p );
50
+ return 0 ;
51
+ }
52
+ early_param ("coherent_pool" , early_coherent_pool );
53
+
54
+ static void * __alloc_from_pool (size_t size , struct page * * ret_page )
55
+ {
56
+ unsigned long val ;
57
+ void * ptr = NULL ;
58
+
59
+ if (!atomic_pool ) {
60
+ WARN (1 , "coherent pool not initialised!\n" );
61
+ return NULL ;
62
+ }
63
+
64
+ val = gen_pool_alloc (atomic_pool , size );
65
+ if (val ) {
66
+ phys_addr_t phys = gen_pool_virt_to_phys (atomic_pool , val );
67
+
68
+ * ret_page = phys_to_page (phys );
69
+ ptr = (void * )val ;
70
+ }
71
+
72
+ return ptr ;
73
+ }
74
+
75
+ static bool __in_atomic_pool (void * start , size_t size )
76
+ {
77
+ return addr_in_gen_pool (atomic_pool , (unsigned long )start , size );
78
+ }
79
+
80
+ static int __free_from_pool (void * start , size_t size )
81
+ {
82
+ if (!__in_atomic_pool (start , size ))
83
+ return 0 ;
84
+
85
+ gen_pool_free (atomic_pool , (unsigned long )start , size );
86
+
87
+ return 1 ;
88
+ }
89
+
41
90
static void * __dma_alloc_coherent (struct device * dev , size_t size ,
42
91
dma_addr_t * dma_handle , gfp_t flags ,
43
92
struct dma_attrs * attrs )
@@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
50
99
if (IS_ENABLED (CONFIG_ZONE_DMA ) &&
51
100
dev -> coherent_dma_mask <= DMA_BIT_MASK (32 ))
52
101
flags |= GFP_DMA ;
53
- if (IS_ENABLED (CONFIG_DMA_CMA )) {
102
+ if (IS_ENABLED (CONFIG_DMA_CMA ) && ( flags & __GFP_WAIT ) ) {
54
103
struct page * page ;
55
104
56
105
size = PAGE_ALIGN (size );
@@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size,
70
119
void * vaddr , dma_addr_t dma_handle ,
71
120
struct dma_attrs * attrs )
72
121
{
122
+ bool freed ;
123
+ phys_addr_t paddr = dma_to_phys (dev , dma_handle );
124
+
73
125
if (dev == NULL ) {
74
126
WARN_ONCE (1 , "Use an actual device structure for DMA allocation\n" );
75
127
return ;
76
128
}
77
129
78
- if (IS_ENABLED (CONFIG_DMA_CMA )) {
79
- phys_addr_t paddr = dma_to_phys (dev , dma_handle );
80
-
81
- dma_release_from_contiguous (dev ,
130
+ freed = dma_release_from_contiguous (dev ,
82
131
phys_to_page (paddr ),
83
132
size >> PAGE_SHIFT );
84
- } else {
133
+ if (! freed )
85
134
swiotlb_free_coherent (dev , size , vaddr , dma_handle );
86
- }
87
135
}
88
136
89
137
static void * __dma_alloc_noncoherent (struct device * dev , size_t size ,
90
138
dma_addr_t * dma_handle , gfp_t flags ,
91
139
struct dma_attrs * attrs )
92
140
{
93
- struct page * page , * * map ;
141
+ struct page * page ;
94
142
void * ptr , * coherent_ptr ;
95
- int order , i ;
96
143
97
144
size = PAGE_ALIGN (size );
98
- order = get_order (size );
145
+
146
+ if (!(flags & __GFP_WAIT )) {
147
+ struct page * page = NULL ;
148
+ void * addr = __alloc_from_pool (size , & page );
149
+
150
+ if (addr )
151
+ * dma_handle = phys_to_dma (dev , page_to_phys (page ));
152
+
153
+ return addr ;
154
+
155
+ }
99
156
100
157
ptr = __dma_alloc_coherent (dev , size , dma_handle , flags , attrs );
101
158
if (!ptr )
102
159
goto no_mem ;
103
- map = kmalloc (sizeof (struct page * ) << order , flags & ~GFP_DMA );
104
- if (!map )
105
- goto no_map ;
106
160
107
161
/* remove any dirty cache lines on the kernel alias */
108
162
__dma_flush_range (ptr , ptr + size );
109
163
110
164
/* create a coherent mapping */
111
165
page = virt_to_page (ptr );
112
- for (i = 0 ; i < (size >> PAGE_SHIFT ); i ++ )
113
- map [i ] = page + i ;
114
- coherent_ptr = vmap (map , size >> PAGE_SHIFT , VM_MAP ,
115
- __get_dma_pgprot (attrs , __pgprot (PROT_NORMAL_NC ), false));
116
- kfree (map );
166
+ coherent_ptr = dma_common_contiguous_remap (page , size , VM_USERMAP ,
167
+ __get_dma_pgprot (attrs ,
168
+ __pgprot (PROT_NORMAL_NC ), false),
169
+ NULL );
117
170
if (!coherent_ptr )
118
171
goto no_map ;
119
172
@@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size,
132
185
{
133
186
void * swiotlb_addr = phys_to_virt (dma_to_phys (dev , dma_handle ));
134
187
188
+ if (__free_from_pool (vaddr , size ))
189
+ return ;
135
190
vunmap (vaddr );
136
191
__dma_free_coherent (dev , size , swiotlb_addr , dma_handle , attrs );
137
192
}
@@ -307,6 +362,67 @@ EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
307
362
308
363
extern int swiotlb_late_init_with_default_size (size_t default_size );
309
364
365
+ static int __init atomic_pool_init (void )
366
+ {
367
+ pgprot_t prot = __pgprot (PROT_NORMAL_NC );
368
+ unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT ;
369
+ struct page * page ;
370
+ void * addr ;
371
+ unsigned int pool_size_order = get_order (atomic_pool_size );
372
+
373
+ if (dev_get_cma_area (NULL ))
374
+ page = dma_alloc_from_contiguous (NULL , nr_pages ,
375
+ pool_size_order );
376
+ else
377
+ page = alloc_pages (GFP_DMA , pool_size_order );
378
+
379
+ if (page ) {
380
+ int ret ;
381
+ void * page_addr = page_address (page );
382
+
383
+ memset (page_addr , 0 , atomic_pool_size );
384
+ __dma_flush_range (page_addr , page_addr + atomic_pool_size );
385
+
386
+ atomic_pool = gen_pool_create (PAGE_SHIFT , -1 );
387
+ if (!atomic_pool )
388
+ goto free_page ;
389
+
390
+ addr = dma_common_contiguous_remap (page , atomic_pool_size ,
391
+ VM_USERMAP , prot , atomic_pool_init );
392
+
393
+ if (!addr )
394
+ goto destroy_genpool ;
395
+
396
+ ret = gen_pool_add_virt (atomic_pool , (unsigned long )addr ,
397
+ page_to_phys (page ),
398
+ atomic_pool_size , -1 );
399
+ if (ret )
400
+ goto remove_mapping ;
401
+
402
+ gen_pool_set_algo (atomic_pool ,
403
+ gen_pool_first_fit_order_align ,
404
+ (void * )PAGE_SHIFT );
405
+
406
+ pr_info ("DMA: preallocated %zu KiB pool for atomic allocations\n" ,
407
+ atomic_pool_size / 1024 );
408
+ return 0 ;
409
+ }
410
+ goto out ;
411
+
412
+ remove_mapping :
413
+ dma_common_free_remap (addr , atomic_pool_size , VM_USERMAP );
414
+ destroy_genpool :
415
+ gen_pool_destroy (atomic_pool );
416
+ atomic_pool = NULL ;
417
+ free_page :
418
+ if (!dma_release_from_contiguous (NULL , page , nr_pages ))
419
+ __free_pages (page , pool_size_order );
420
+ out :
421
+ pr_err ("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n" ,
422
+ atomic_pool_size / 1024 );
423
+ return - ENOMEM ;
424
+ }
425
+
310
426
static int __init swiotlb_late_init (void )
311
427
{
312
428
size_t swiotlb_size = min (SZ_64M , MAX_ORDER_NR_PAGES << PAGE_SHIFT );
@@ -315,7 +431,17 @@ static int __init swiotlb_late_init(void)
315
431
316
432
return swiotlb_late_init_with_default_size (swiotlb_size );
317
433
}
318
- arch_initcall (swiotlb_late_init );
434
+
435
+ static int __init arm64_dma_init (void )
436
+ {
437
+ int ret = 0 ;
438
+
439
+ ret |= swiotlb_late_init ();
440
+ ret |= atomic_pool_init ();
441
+
442
+ return ret ;
443
+ }
444
+ arch_initcall (arm64_dma_init );
319
445
320
446
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
321
447
0 commit comments