Skip to content

Commit 5884e17

Browse files
Varadarajan Narayananbroonie
authored andcommitted
spi: qup: allow multiple DMA transactions per spi xfer
Much like the block mode changes, we are breaking up DMA transactions into 64K chunks so we can reset the QUP engine. Signed-off-by: Matthew McClintock <[email protected]> Signed-off-by: Varadarajan Narayanan <[email protected]> Signed-off-by: Mark Brown <[email protected]>
1 parent a841b24 commit 5884e17

File tree

1 file changed

+66
-26
lines changed

1 file changed

+66
-26
lines changed

drivers/spi/spi-qup.c

Lines changed: 66 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -418,53 +418,93 @@ static void spi_qup_dma_terminate(struct spi_master *master,
418418
dmaengine_terminate_all(master->dma_rx);
419419
}
420420

421+
static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
422+
u32 *nents)
423+
{
424+
struct scatterlist *sg;
425+
u32 total = 0;
426+
427+
*nents = 0;
428+
429+
for (sg = sgl; sg; sg = sg_next(sg)) {
430+
unsigned int len = sg_dma_len(sg);
431+
432+
/* check for overflow as well as limit */
433+
if (((total + len) < total) || ((total + len) > max))
434+
break;
435+
436+
total += len;
437+
(*nents)++;
438+
}
439+
440+
return total;
441+
}
442+
421443
static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
422444
unsigned long timeout)
423445
{
424446
dma_async_tx_callback rx_done = NULL, tx_done = NULL;
425447
struct spi_master *master = spi->master;
426448
struct spi_qup *qup = spi_master_get_devdata(master);
449+
struct scatterlist *tx_sgl, *rx_sgl;
427450
int ret;
428451

429452
if (xfer->rx_buf)
430453
rx_done = spi_qup_dma_done;
431454
else if (xfer->tx_buf)
432455
tx_done = spi_qup_dma_done;
433456

434-
ret = spi_qup_io_config(spi, xfer);
435-
if (ret)
436-
return ret;
457+
rx_sgl = xfer->rx_sg.sgl;
458+
tx_sgl = xfer->tx_sg.sgl;
437459

438-
/* before issuing the descriptors, set the QUP to run */
439-
ret = spi_qup_set_state(qup, QUP_STATE_RUN);
440-
if (ret) {
441-
dev_warn(qup->dev, "%s(%d): cannot set RUN state\n",
442-
__func__, __LINE__);
443-
return ret;
444-
}
460+
do {
461+
u32 rx_nents, tx_nents;
462+
463+
if (rx_sgl)
464+
qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
465+
SPI_MAX_XFER, &rx_nents) / qup->w_size;
466+
if (tx_sgl)
467+
qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
468+
SPI_MAX_XFER, &tx_nents) / qup->w_size;
469+
if (!qup->n_words)
470+
return -EIO;
445471

446-
if (xfer->rx_buf) {
447-
ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
448-
xfer->rx_sg.nents, DMA_DEV_TO_MEM,
449-
rx_done);
472+
ret = spi_qup_io_config(spi, xfer);
450473
if (ret)
451474
return ret;
452475

453-
dma_async_issue_pending(master->dma_rx);
454-
}
455-
456-
if (xfer->tx_buf) {
457-
ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
458-
xfer->tx_sg.nents, DMA_MEM_TO_DEV,
459-
tx_done);
460-
if (ret)
476+
/* before issuing the descriptors, set the QUP to run */
477+
ret = spi_qup_set_state(qup, QUP_STATE_RUN);
478+
if (ret) {
479+
dev_warn(qup->dev, "cannot set RUN state\n");
461480
return ret;
481+
}
482+
if (rx_sgl) {
483+
ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
484+
DMA_DEV_TO_MEM, rx_done);
485+
if (ret)
486+
return ret;
487+
dma_async_issue_pending(master->dma_rx);
488+
}
462489

463-
dma_async_issue_pending(master->dma_tx);
464-
}
490+
if (tx_sgl) {
491+
ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
492+
DMA_MEM_TO_DEV, tx_done);
493+
if (ret)
494+
return ret;
495+
496+
dma_async_issue_pending(master->dma_tx);
497+
}
498+
499+
if (!wait_for_completion_timeout(&qup->done, timeout))
500+
return -ETIMEDOUT;
501+
502+
for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
503+
;
504+
for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
505+
;
465506

466-
if (!wait_for_completion_timeout(&qup->done, timeout))
467-
return -ETIMEDOUT;
507+
} while (rx_sgl || tx_sgl);
468508

469509
return 0;
470510
}

0 commit comments

Comments
 (0)