Skip to content

Commit edf1091

Browse files
SylvainLesneVinod Koul
authored andcommitted
dmaengine: altera: fix spinlock usage
Since this lock is acquired in both process and IRQ context, failing to to disable IRQs when trying to acquire the lock in process context can lead to deadlocks. Signed-off-by: Sylvain Lesne <[email protected]> Reviewed-by: Stefan Roese <[email protected]> Signed-off-by: Vinod Koul <[email protected]>
1 parent d9ec464 commit edf1091

File tree

1 file changed

+21
-14
lines changed

1 file changed

+21
-14
lines changed

drivers/dma/altera-msgdma.c

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -212,11 +212,12 @@ struct msgdma_device {
212212
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213213
{
214214
struct msgdma_sw_desc *desc;
215+
unsigned long flags;
215216

216-
spin_lock_bh(&mdev->lock);
217+
spin_lock_irqsave(&mdev->lock, flags);
217218
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
218219
list_del(&desc->node);
219-
spin_unlock_bh(&mdev->lock);
220+
spin_unlock_irqrestore(&mdev->lock, flags);
220221

221222
INIT_LIST_HEAD(&desc->tx_list);
222223

@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306307
struct msgdma_device *mdev = to_mdev(tx->chan);
307308
struct msgdma_sw_desc *new;
308309
dma_cookie_t cookie;
310+
unsigned long flags;
309311

310312
new = tx_to_desc(tx);
311-
spin_lock_bh(&mdev->lock);
313+
spin_lock_irqsave(&mdev->lock, flags);
312314
cookie = dma_cookie_assign(tx);
313315

314316
list_add_tail(&new->node, &mdev->pending_list);
315-
spin_unlock_bh(&mdev->lock);
317+
spin_unlock_irqrestore(&mdev->lock, flags);
316318

317319
return cookie;
318320
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
336338
struct msgdma_extended_desc *desc;
337339
size_t copy;
338340
u32 desc_cnt;
341+
unsigned long irqflags;
339342

340343
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
341344

342-
spin_lock_bh(&mdev->lock);
345+
spin_lock_irqsave(&mdev->lock, irqflags);
343346
if (desc_cnt > mdev->desc_free_cnt) {
344347
spin_unlock_bh(&mdev->lock);
345348
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
346349
return NULL;
347350
}
348351
mdev->desc_free_cnt -= desc_cnt;
349-
spin_unlock_bh(&mdev->lock);
352+
spin_unlock_irqrestore(&mdev->lock, irqflags);
350353

351354
do {
352355
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
397400
u32 desc_cnt = 0, i;
398401
struct scatterlist *sg;
399402
u32 stride;
403+
unsigned long irqflags;
400404

401405
for_each_sg(sgl, sg, sg_len, i)
402406
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
403407

404-
spin_lock_bh(&mdev->lock);
408+
spin_lock_irqsave(&mdev->lock, irqflags);
405409
if (desc_cnt > mdev->desc_free_cnt) {
406410
spin_unlock_bh(&mdev->lock);
407411
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
408412
return NULL;
409413
}
410414
mdev->desc_free_cnt -= desc_cnt;
411-
spin_unlock_bh(&mdev->lock);
415+
spin_unlock_irqrestore(&mdev->lock, irqflags);
412416

413417
avail = sg_dma_len(sgl);
414418

@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
566570
static void msgdma_issue_pending(struct dma_chan *chan)
567571
{
568572
struct msgdma_device *mdev = to_mdev(chan);
573+
unsigned long flags;
569574

570-
spin_lock_bh(&mdev->lock);
575+
spin_lock_irqsave(&mdev->lock, flags);
571576
msgdma_start_transfer(mdev);
572-
spin_unlock_bh(&mdev->lock);
577+
spin_unlock_irqrestore(&mdev->lock, flags);
573578
}
574579

575580
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
634639
static void msgdma_free_chan_resources(struct dma_chan *dchan)
635640
{
636641
struct msgdma_device *mdev = to_mdev(dchan);
642+
unsigned long flags;
637643

638-
spin_lock_bh(&mdev->lock);
644+
spin_lock_irqsave(&mdev->lock, flags);
639645
msgdma_free_descriptors(mdev);
640-
spin_unlock_bh(&mdev->lock);
646+
spin_unlock_irqrestore(&mdev->lock, flags);
641647
kfree(mdev->sw_desq);
642648
}
643649

@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
682688
u32 count;
683689
u32 __maybe_unused size;
684690
u32 __maybe_unused status;
691+
unsigned long flags;
685692

686-
spin_lock(&mdev->lock);
693+
spin_lock_irqsave(&mdev->lock, flags);
687694

688695
/* Read number of responses that are available */
689696
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
704711
msgdma_chan_desc_cleanup(mdev);
705712
}
706713

707-
spin_unlock(&mdev->lock);
714+
spin_unlock_irqrestore(&mdev->lock, flags);
708715
}
709716

710717
/**

0 commit comments

Comments
 (0)