Skip to content

Commit da6b736

Browse files
committed
Merge tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "A few fixes for drivers, nothing major here. Fixes are: iotdma fix to restart channels, new ID for wildcat PCH, residue fix for edma, disable irq for non-cyclic in dw" * tag 'dmaengine-fix-4.5-rc5' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer dmaengine: edma: fix residue race for cyclic dmaengine: dw: pci: add ID for WildcatPoint PCH dmaengine: IOATDMA: fix timer code that continues to restart channels during idle
2 parents 37aa4da + ee1cdcd commit da6b736

File tree

4 files changed

+76
-18
lines changed

4 files changed

+76
-18
lines changed

drivers/dma/dw/core.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156156

157157
/* Enable interrupts */
158158
channel_set_bit(dw, MASK.XFER, dwc->mask);
159-
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160159
channel_set_bit(dw, MASK.ERROR, dwc->mask);
161160

162161
dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588587

589588
spin_unlock_irqrestore(&dwc->lock, flags);
590589
}
590+
591+
/* Re-enable interrupts */
592+
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591593
}
592594

593595
/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618620
dwc_scan_descriptors(dw, dwc);
619621
}
620622

621-
/*
622-
* Re-enable interrupts.
623-
*/
623+
/* Re-enable interrupts */
624624
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625-
channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626625
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627626
}
628627

@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
12611260
int dw_dma_cyclic_start(struct dma_chan *chan)
12621261
{
12631262
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263+
struct dw_dma *dw = to_dw_dma(chan->device);
12641264
unsigned long flags;
12651265

12661266
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
12691269
}
12701270

12711271
spin_lock_irqsave(&dwc->lock, flags);
1272+
1273+
/* Enable interrupts to perform cyclic transfer */
1274+
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275+
12721276
dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277+
12731278
spin_unlock_irqrestore(&dwc->lock, flags);
12741279

12751280
return 0;

drivers/dma/dw/pci.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108108

109109
/* Haswell */
110110
{ PCI_VDEVICE(INTEL, 0x9c60) },
111+
112+
/* Broadwell */
113+
{ PCI_VDEVICE(INTEL, 0x9ce0) },
114+
111115
{ }
112116
};
113117
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);

drivers/dma/edma.c

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@
113113
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114114
#define CHMAP_EXIST BIT(24)
115115

116+
/* CCSTAT register */
117+
#define EDMA_CCSTAT_ACTV BIT(4)
118+
116119
/*
117120
* Max of 20 segments per channel to conserve PaRAM slots
118121
* Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
16801683
spin_unlock_irqrestore(&echan->vchan.lock, flags);
16811684
}
16821685

1686+
/*
1687+
* This limit exists to avoid a possible infinite loop when waiting for proof
1688+
* that a particular transfer is completed. This limit can be hit if there
1689+
* are large bursts to/from slow devices or the CPU is never able to catch
1690+
* the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691+
* RX-FIFO, as many as 55 loops have been seen.
1692+
*/
1693+
#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694+
16831695
static u32 edma_residue(struct edma_desc *edesc)
16841696
{
16851697
bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698+
int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699+
struct edma_chan *echan = edesc->echan;
16861700
struct edma_pset *pset = edesc->pset;
16871701
dma_addr_t done, pos;
16881702
int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
16911705
* We always read the dst/src position from the first RamPar
16921706
* pset. That's the one which is active now.
16931707
*/
1694-
pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
1708+
pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709+
1710+
/*
1711+
* "pos" may represent a transfer request that is still being
1712+
* processed by the EDMACC or EDMATC. We will busy wait until
1713+
* any one of the situations occurs:
1714+
* 1. the DMA hardware is idle
1715+
* 2. a new transfer request is setup
1716+
* 3. we hit the loop limit
1717+
*/
1718+
while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719+
/* check if a new transfer request is setup */
1720+
if (edma_get_position(echan->ecc,
1721+
echan->slot[0], dst) != pos) {
1722+
break;
1723+
}
1724+
1725+
if (!--loop_count) {
1726+
dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727+
"%s: timeout waiting for PaRAM update\n",
1728+
__func__);
1729+
break;
1730+
}
1731+
1732+
cpu_relax();
1733+
}
16951734

16961735
/*
16971736
* Cyclic is simple. Just subtract pset[0].addr from pos.

drivers/dma/ioat/dma.c

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861861
return;
862862
}
863863

864+
spin_lock_bh(&ioat_chan->cleanup_lock);
865+
866+
/* handle the no-actives case */
867+
if (!ioat_ring_active(ioat_chan)) {
868+
spin_lock_bh(&ioat_chan->prep_lock);
869+
check_active(ioat_chan);
870+
spin_unlock_bh(&ioat_chan->prep_lock);
871+
spin_unlock_bh(&ioat_chan->cleanup_lock);
872+
return;
873+
}
874+
864875
/* if we haven't made progress and we have already
865876
* acknowledged a pending completion once, then be more
866877
* forceful with a restart
867878
*/
868-
spin_lock_bh(&ioat_chan->cleanup_lock);
869879
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870880
__cleanup(ioat_chan, phys_complete);
871881
else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882+
u32 chanerr;
883+
884+
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885+
dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886+
dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887+
status, chanerr);
888+
dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889+
ioat_ring_active(ioat_chan));
890+
872891
spin_lock_bh(&ioat_chan->prep_lock);
873892
ioat_restart_channel(ioat_chan);
874893
spin_unlock_bh(&ioat_chan->prep_lock);
875894
spin_unlock_bh(&ioat_chan->cleanup_lock);
876895
return;
877-
} else {
896+
} else
878897
set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879-
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880-
}
881-
882898

883-
if (ioat_ring_active(ioat_chan))
884-
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885-
else {
886-
spin_lock_bh(&ioat_chan->prep_lock);
887-
check_active(ioat_chan);
888-
spin_unlock_bh(&ioat_chan->prep_lock);
889-
}
899+
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
890900
spin_unlock_bh(&ioat_chan->cleanup_lock);
891901
}
892902

0 commit comments

Comments
 (0)