@@ -147,6 +147,22 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
147
147
return page ;
148
148
}
149
149
150
+ static void * dma_direct_alloc_from_pool (struct device * dev , size_t size ,
151
+ dma_addr_t * dma_handle , gfp_t gfp )
152
+ {
153
+ struct page * page ;
154
+ u64 phys_mask ;
155
+ void * ret ;
156
+
157
+ gfp |= dma_direct_optimal_gfp_mask (dev , dev -> coherent_dma_mask ,
158
+ & phys_mask );
159
+ page = dma_alloc_from_pool (dev , size , & ret , gfp , dma_coherent_ok );
160
+ if (!page )
161
+ return NULL ;
162
+ * dma_handle = phys_to_dma_direct (dev , page_to_phys (page ));
163
+ return ret ;
164
+ }
165
+
150
166
void * dma_direct_alloc (struct device * dev , size_t size ,
151
167
dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
152
168
{
@@ -163,17 +179,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
163
179
if (attrs & DMA_ATTR_NO_WARN )
164
180
gfp |= __GFP_NOWARN ;
165
181
166
- if (dma_should_alloc_from_pool (dev , gfp , attrs )) {
167
- u64 phys_mask ;
168
-
169
- gfp |= dma_direct_optimal_gfp_mask (dev , dev -> coherent_dma_mask ,
170
- & phys_mask );
171
- page = dma_alloc_from_pool (dev , size , & ret , gfp ,
172
- dma_coherent_ok );
173
- if (!page )
174
- return NULL ;
175
- goto done ;
176
- }
182
+ if (dma_should_alloc_from_pool (dev , gfp , attrs ))
183
+ return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
177
184
178
185
/* we always manually zero the memory once we are done */
179
186
page = __dma_direct_alloc_pages (dev , size , gfp & ~__GFP_ZERO );
@@ -298,13 +305,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
298
305
struct page * page ;
299
306
void * ret ;
300
307
301
- if (dma_should_alloc_from_pool (dev , gfp , 0 )) {
302
- page = dma_alloc_from_pool (dev , size , & ret , gfp ,
303
- dma_coherent_ok );
304
- if (!page )
305
- return NULL ;
306
- goto done ;
307
- }
308
+ if (dma_should_alloc_from_pool (dev , gfp , 0 ))
309
+ return dma_direct_alloc_from_pool (dev , size , dma_handle , gfp );
308
310
309
311
page = __dma_direct_alloc_pages (dev , size , gfp );
310
312
if (!page )
@@ -327,7 +329,6 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
327
329
goto out_free_pages ;
328
330
}
329
331
memset (ret , 0 , size );
330
- done :
331
332
* dma_handle = phys_to_dma_direct (dev , page_to_phys (page ));
332
333
return page ;
333
334
out_free_pages :
0 commit comments