Skip to content

Commit 51fc7b2

Browse files
lyakhcjb
authored andcommitted
mmc: tmio-mmc: Improve DMA stability on sh-mobile
On some SDHI tmio implementations the order of DMA and command completion interrupts swaps, which leads to malfunction. This patch postpones DMA activation until the MMC command completion IRQ time. Signed-off-by: Guennadi Liakhovetski <[email protected]> Acked-by: Paul Mundt <[email protected]> Signed-off-by: Chris Ball <[email protected]>
1 parent 4f665cb commit 51fc7b2

File tree

1 file changed

+34
-29
lines changed

1 file changed

+34
-29
lines changed

drivers/mmc/host/tmio_mmc.c

Lines changed: 34 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
485485
unsigned int count;
486486
unsigned long flags;
487487

488-
if (!data) {
488+
if (host->chan_tx || host->chan_rx) {
489+
pr_err("PIO IRQ in DMA mode!\n");
490+
return;
491+
} else if (!data) {
489492
pr_debug("Spurious PIO IRQ\n");
490493
return;
491494
}
@@ -648,6 +651,8 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
648651
if (host->data->flags & MMC_DATA_READ) {
649652
if (!host->chan_rx)
650653
enable_mmc_irqs(host, TMIO_MASK_READOP);
654+
else
655+
tasklet_schedule(&host->dma_issue);
651656
} else {
652657
if (!host->chan_tx)
653658
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
@@ -779,18 +784,6 @@ static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
779784
#endif
780785
}
781786

782-
static void tmio_dma_complete(void *arg)
783-
{
784-
struct tmio_mmc_host *host = arg;
785-
786-
dev_dbg(&host->pdev->dev, "Command completed\n");
787-
788-
if (!host->data)
789-
dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
790-
else
791-
enable_mmc_irqs(host, TMIO_STAT_DATAEND);
792-
}
793-
794787
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
795788
{
796789
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
@@ -817,6 +810,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
817810
goto pio;
818811
}
819812

813+
disable_mmc_irqs(host, TMIO_STAT_RXRDY);
814+
820815
/* The only sg element can be unaligned, use our bounce buffer then */
821816
if (!aligned) {
822817
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
@@ -827,14 +822,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
827822
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
828823
if (ret > 0)
829824
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
830-
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
825+
DMA_FROM_DEVICE, DMA_CTRL_ACK);
831826

832-
if (desc) {
833-
desc->callback = tmio_dma_complete;
834-
desc->callback_param = host;
827+
if (desc)
835828
cookie = dmaengine_submit(desc);
836-
dma_async_issue_pending(chan);
837-
}
829+
838830
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
839831
__func__, host->sg_len, ret, cookie, host->mrq);
840832

@@ -886,6 +878,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
886878
goto pio;
887879
}
888880

881+
disable_mmc_irqs(host, TMIO_STAT_TXRQ);
882+
889883
/* The only sg element can be unaligned, use our bounce buffer then */
890884
if (!aligned) {
891885
unsigned long flags;
@@ -900,13 +894,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
900894
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
901895
if (ret > 0)
902896
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
903-
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
897+
DMA_TO_DEVICE, DMA_CTRL_ACK);
904898

905-
if (desc) {
906-
desc->callback = tmio_dma_complete;
907-
desc->callback_param = host;
899+
if (desc)
908900
cookie = dmaengine_submit(desc);
909-
}
901+
910902
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
911903
__func__, host->sg_len, ret, cookie, host->mrq);
912904

@@ -947,17 +939,30 @@ static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
947939
static void tmio_issue_tasklet_fn(unsigned long priv)
948940
{
949941
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
950-
struct dma_chan *chan = host->chan_tx;
942+
struct dma_chan *chan = NULL;
943+
944+
spin_lock_irq(&host->lock);
945+
946+
if (host && host->data) {
947+
if (host->data->flags & MMC_DATA_READ)
948+
chan = host->chan_rx;
949+
else
950+
chan = host->chan_tx;
951+
}
952+
953+
spin_unlock_irq(&host->lock);
951954

952-
dma_async_issue_pending(chan);
955+
enable_mmc_irqs(host, TMIO_STAT_DATAEND);
956+
957+
if (chan)
958+
dma_async_issue_pending(chan);
953959
}
954960

955961
static void tmio_tasklet_fn(unsigned long arg)
956962
{
957963
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
958-
unsigned long flags;
959964

960-
spin_lock_irqsave(&host->lock, flags);
965+
spin_lock_irq(&host->lock);
961966

962967
if (!host->data)
963968
goto out;
@@ -973,7 +978,7 @@ static void tmio_tasklet_fn(unsigned long arg)
973978

974979
tmio_mmc_do_data_irq(host);
975980
out:
976-
spin_unlock_irqrestore(&host->lock, flags);
981+
spin_unlock_irq(&host->lock);
977982
}
978983

979984
/* It might be necessary to make filter MFD specific */

0 commit comments

Comments
 (0)