@@ -133,17 +133,18 @@ mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
133
133
134
134
static int
135
135
mtk_wed_wo_queue_refill (struct mtk_wed_wo * wo , struct mtk_wed_wo_queue * q ,
136
- gfp_t gfp , bool rx )
136
+ bool rx )
137
137
{
138
138
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE ;
139
139
int n_buf = 0 ;
140
140
141
141
spin_lock_bh (& q -> lock );
142
142
while (q -> queued < q -> n_desc ) {
143
- void * buf = page_frag_alloc (& q -> cache , q -> buf_size , gfp );
144
143
struct mtk_wed_wo_queue_entry * entry ;
145
144
dma_addr_t addr ;
145
+ void * buf ;
146
146
147
+ buf = page_frag_alloc (& q -> cache , q -> buf_size , GFP_ATOMIC );
147
148
if (!buf )
148
149
break ;
149
150
@@ -215,7 +216,7 @@ mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
215
216
mtk_wed_mcu_rx_unsolicited_event (wo , skb );
216
217
}
217
218
218
- if (mtk_wed_wo_queue_refill (wo , q , GFP_ATOMIC , true)) {
219
+ if (mtk_wed_wo_queue_refill (wo , q , true)) {
219
220
u32 index = (q -> head - 1 ) % q -> n_desc ;
220
221
221
222
mtk_wed_wo_queue_kick (wo , q , index );
@@ -432,7 +433,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
432
433
if (ret )
433
434
goto error ;
434
435
435
- mtk_wed_wo_queue_refill (wo , & wo -> q_tx , GFP_KERNEL , false);
436
+ mtk_wed_wo_queue_refill (wo , & wo -> q_tx , false);
436
437
mtk_wed_wo_queue_reset (wo , & wo -> q_tx );
437
438
438
439
regs .desc_base = MTK_WED_WO_CCIF_DUMMY5 ;
@@ -446,7 +447,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
446
447
if (ret )
447
448
goto error ;
448
449
449
- mtk_wed_wo_queue_refill (wo , & wo -> q_rx , GFP_KERNEL , true);
450
+ mtk_wed_wo_queue_refill (wo , & wo -> q_rx , true);
450
451
mtk_wed_wo_queue_reset (wo , & wo -> q_rx );
451
452
452
453
/* rx queue irqmask */
0 commit comments