Skip to content

Commit 7a23c5a

Browse files
committed
Merge tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "Here are fixes for this round - fix spinlock usage amd fifo response for altera driver - fix ti crossbar race condition - fix edma memcpy align" * tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: altera: fix spinlock usage dmaengine: altera: fix response FIFO emptying dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse dmaengine: edma: Align the memcpy acnt array size with the transfer
2 parents e7a36a6 + edf1091 commit 7a23c5a

File tree

3 files changed

+40
-19
lines changed

3 files changed

+40
-19
lines changed

drivers/dma/altera-msgdma.c

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -212,11 +212,12 @@ struct msgdma_device {
212212
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213213
{
214214
struct msgdma_sw_desc *desc;
215+
unsigned long flags;
215216

216-
spin_lock_bh(&mdev->lock);
217+
spin_lock_irqsave(&mdev->lock, flags);
217218
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
218219
list_del(&desc->node);
219-
spin_unlock_bh(&mdev->lock);
220+
spin_unlock_irqrestore(&mdev->lock, flags);
220221

221222
INIT_LIST_HEAD(&desc->tx_list);
222223

@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306307
struct msgdma_device *mdev = to_mdev(tx->chan);
307308
struct msgdma_sw_desc *new;
308309
dma_cookie_t cookie;
310+
unsigned long flags;
309311

310312
new = tx_to_desc(tx);
311-
spin_lock_bh(&mdev->lock);
313+
spin_lock_irqsave(&mdev->lock, flags);
312314
cookie = dma_cookie_assign(tx);
313315

314316
list_add_tail(&new->node, &mdev->pending_list);
315-
spin_unlock_bh(&mdev->lock);
317+
spin_unlock_irqrestore(&mdev->lock, flags);
316318

317319
return cookie;
318320
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
336338
struct msgdma_extended_desc *desc;
337339
size_t copy;
338340
u32 desc_cnt;
341+
unsigned long irqflags;
339342

340343
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
341344

342-
spin_lock_bh(&mdev->lock);
345+
spin_lock_irqsave(&mdev->lock, irqflags);
343346
if (desc_cnt > mdev->desc_free_cnt) {
344347
spin_unlock_bh(&mdev->lock);
345348
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
346349
return NULL;
347350
}
348351
mdev->desc_free_cnt -= desc_cnt;
349-
spin_unlock_bh(&mdev->lock);
352+
spin_unlock_irqrestore(&mdev->lock, irqflags);
350353

351354
do {
352355
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
397400
u32 desc_cnt = 0, i;
398401
struct scatterlist *sg;
399402
u32 stride;
403+
unsigned long irqflags;
400404

401405
for_each_sg(sgl, sg, sg_len, i)
402406
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
403407

404-
spin_lock_bh(&mdev->lock);
408+
spin_lock_irqsave(&mdev->lock, irqflags);
405409
if (desc_cnt > mdev->desc_free_cnt) {
406410
spin_unlock_bh(&mdev->lock);
407411
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
408412
return NULL;
409413
}
410414
mdev->desc_free_cnt -= desc_cnt;
411-
spin_unlock_bh(&mdev->lock);
415+
spin_unlock_irqrestore(&mdev->lock, irqflags);
412416

413417
avail = sg_dma_len(sgl);
414418

@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
566570
static void msgdma_issue_pending(struct dma_chan *chan)
567571
{
568572
struct msgdma_device *mdev = to_mdev(chan);
573+
unsigned long flags;
569574

570-
spin_lock_bh(&mdev->lock);
575+
spin_lock_irqsave(&mdev->lock, flags);
571576
msgdma_start_transfer(mdev);
572-
spin_unlock_bh(&mdev->lock);
577+
spin_unlock_irqrestore(&mdev->lock, flags);
573578
}
574579

575580
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
634639
static void msgdma_free_chan_resources(struct dma_chan *dchan)
635640
{
636641
struct msgdma_device *mdev = to_mdev(dchan);
642+
unsigned long flags;
637643

638-
spin_lock_bh(&mdev->lock);
644+
spin_lock_irqsave(&mdev->lock, flags);
639645
msgdma_free_descriptors(mdev);
640-
spin_unlock_bh(&mdev->lock);
646+
spin_unlock_irqrestore(&mdev->lock, flags);
641647
kfree(mdev->sw_desq);
642648
}
643649

@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
682688
u32 count;
683689
u32 __maybe_unused size;
684690
u32 __maybe_unused status;
691+
unsigned long flags;
685692

686-
spin_lock(&mdev->lock);
693+
spin_lock_irqsave(&mdev->lock, flags);
687694

688695
/* Read number of responses that are available */
689696
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
698705
* bits. So we need to just drop these values.
699706
*/
700707
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
701-
status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
708+
status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
702709

703710
msgdma_complete_descriptor(mdev);
704711
msgdma_chan_desc_cleanup(mdev);
705712
}
706713

707-
spin_unlock(&mdev->lock);
714+
spin_unlock_irqrestore(&mdev->lock, flags);
708715
}
709716

710717
/**

drivers/dma/edma.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
11431143
struct edma_desc *edesc;
11441144
struct device *dev = chan->device->dev;
11451145
struct edma_chan *echan = to_edma_chan(chan);
1146-
unsigned int width, pset_len;
1146+
unsigned int width, pset_len, array_size;
11471147

11481148
if (unlikely(!echan || !len))
11491149
return NULL;
11501150

1151+
/* Align the array size (acnt block) with the transfer properties */
1152+
switch (__ffs((src | dest | len))) {
1153+
case 0:
1154+
array_size = SZ_32K - 1;
1155+
break;
1156+
case 1:
1157+
array_size = SZ_32K - 2;
1158+
break;
1159+
default:
1160+
array_size = SZ_32K - 4;
1161+
break;
1162+
}
1163+
11511164
if (len < SZ_64K) {
11521165
/*
11531166
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
11691182
* When the full_length is multibple of 32767 one slot can be
11701183
* used to complete the transfer.
11711184
*/
1172-
width = SZ_32K - 1;
1185+
width = array_size;
11731186
pset_len = rounddown(len, width);
11741187
/* One slot is enough for lengths multiple of (SZ_32K -1) */
11751188
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
12171230
}
12181231
dest += pset_len;
12191232
src += pset_len;
1220-
pset_len = width = len % (SZ_32K - 1);
1233+
pset_len = width = len % array_size;
12211234

12221235
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
12231236
width, pset_len, DMA_MEM_TO_MEM);

drivers/dma/ti-dma-crossbar.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
262262
mutex_lock(&xbar->mutex);
263263
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
264264
xbar->dma_requests);
265-
mutex_unlock(&xbar->mutex);
266265
if (map->xbar_out == xbar->dma_requests) {
266+
mutex_unlock(&xbar->mutex);
267267
dev_err(&pdev->dev, "Run out of free DMA requests\n");
268268
kfree(map);
269269
return ERR_PTR(-ENOMEM);
270270
}
271271
set_bit(map->xbar_out, xbar->dma_inuse);
272+
mutex_unlock(&xbar->mutex);
272273

273274
map->xbar_in = (u16)dma_spec->args[0];
274275

0 commit comments

Comments
 (0)