@@ -212,11 +212,12 @@ struct msgdma_device {
212
212
static struct msgdma_sw_desc * msgdma_get_descriptor (struct msgdma_device * mdev )
213
213
{
214
214
struct msgdma_sw_desc * desc ;
215
+ unsigned long flags ;
215
216
216
- spin_lock_bh (& mdev -> lock );
217
+ spin_lock_irqsave (& mdev -> lock , flags );
217
218
desc = list_first_entry (& mdev -> free_list , struct msgdma_sw_desc , node );
218
219
list_del (& desc -> node );
219
- spin_unlock_bh (& mdev -> lock );
220
+ spin_unlock_irqrestore (& mdev -> lock , flags );
220
221
221
222
INIT_LIST_HEAD (& desc -> tx_list );
222
223
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306
307
struct msgdma_device * mdev = to_mdev (tx -> chan );
307
308
struct msgdma_sw_desc * new ;
308
309
dma_cookie_t cookie ;
310
+ unsigned long flags ;
309
311
310
312
new = tx_to_desc (tx );
311
- spin_lock_bh (& mdev -> lock );
313
+ spin_lock_irqsave (& mdev -> lock , flags );
312
314
cookie = dma_cookie_assign (tx );
313
315
314
316
list_add_tail (& new -> node , & mdev -> pending_list );
315
- spin_unlock_bh (& mdev -> lock );
317
+ spin_unlock_irqrestore (& mdev -> lock , flags );
316
318
317
319
return cookie ;
318
320
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
336
338
struct msgdma_extended_desc * desc ;
337
339
size_t copy ;
338
340
u32 desc_cnt ;
341
+ unsigned long irqflags ;
339
342
340
343
desc_cnt = DIV_ROUND_UP (len , MSGDMA_MAX_TRANS_LEN );
341
344
342
- spin_lock_bh (& mdev -> lock );
345
+ spin_lock_irqsave (& mdev -> lock , irqflags );
343
346
if (desc_cnt > mdev -> desc_free_cnt ) {
344
347
spin_unlock_bh (& mdev -> lock );
345
348
dev_dbg (mdev -> dev , "mdev %p descs are not available\n" , mdev );
346
349
return NULL ;
347
350
}
348
351
mdev -> desc_free_cnt -= desc_cnt ;
349
- spin_unlock_bh (& mdev -> lock );
352
+ spin_unlock_irqrestore (& mdev -> lock , irqflags );
350
353
351
354
do {
352
355
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
397
400
u32 desc_cnt = 0 , i ;
398
401
struct scatterlist * sg ;
399
402
u32 stride ;
403
+ unsigned long irqflags ;
400
404
401
405
for_each_sg (sgl , sg , sg_len , i )
402
406
desc_cnt += DIV_ROUND_UP (sg_dma_len (sg ), MSGDMA_MAX_TRANS_LEN );
403
407
404
- spin_lock_bh (& mdev -> lock );
408
+ spin_lock_irqsave (& mdev -> lock , irqflags );
405
409
if (desc_cnt > mdev -> desc_free_cnt ) {
406
410
spin_unlock_bh (& mdev -> lock );
407
411
dev_dbg (mdev -> dev , "mdev %p descs are not available\n" , mdev );
408
412
return NULL ;
409
413
}
410
414
mdev -> desc_free_cnt -= desc_cnt ;
411
- spin_unlock_bh (& mdev -> lock );
415
+ spin_unlock_irqrestore (& mdev -> lock , irqflags );
412
416
413
417
avail = sg_dma_len (sgl );
414
418
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
566
570
static void msgdma_issue_pending (struct dma_chan * chan )
567
571
{
568
572
struct msgdma_device * mdev = to_mdev (chan );
573
+ unsigned long flags ;
569
574
570
- spin_lock_bh (& mdev -> lock );
575
+ spin_lock_irqsave (& mdev -> lock , flags );
571
576
msgdma_start_transfer (mdev );
572
- spin_unlock_bh (& mdev -> lock );
577
+ spin_unlock_irqrestore (& mdev -> lock , flags );
573
578
}
574
579
575
580
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
634
639
static void msgdma_free_chan_resources (struct dma_chan * dchan )
635
640
{
636
641
struct msgdma_device * mdev = to_mdev (dchan );
642
+ unsigned long flags ;
637
643
638
- spin_lock_bh (& mdev -> lock );
644
+ spin_lock_irqsave (& mdev -> lock , flags );
639
645
msgdma_free_descriptors (mdev );
640
- spin_unlock_bh (& mdev -> lock );
646
+ spin_unlock_irqrestore (& mdev -> lock , flags );
641
647
kfree (mdev -> sw_desq );
642
648
}
643
649
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
682
688
u32 count ;
683
689
u32 __maybe_unused size ;
684
690
u32 __maybe_unused status ;
691
+ unsigned long flags ;
685
692
686
- spin_lock (& mdev -> lock );
693
+ spin_lock_irqsave (& mdev -> lock , flags );
687
694
688
695
/* Read number of responses that are available */
689
696
count = ioread32 (mdev -> csr + MSGDMA_CSR_RESP_FILL_LEVEL );
@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
704
711
msgdma_chan_desc_cleanup (mdev );
705
712
}
706
713
707
- spin_unlock (& mdev -> lock );
714
+ spin_unlock_irqrestore (& mdev -> lock , flags );
708
715
}
709
716
710
717
/**
0 commit comments