Skip to content

Commit 6fba14a

Browse files
committed
Merge tag 'dmaengine-fix-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine fixes from Vinod Koul: - Revert pl330 issue_pending waits until WFP state due to regression reported in Bluetooth loading - Xilinx driver fixes for synchronization, buffer offsets, locking and kdoc - idxd fixes for spinlock and preventing the migration of the perf context to an invalid target - idma driver fix for interrupt handling when powered off - Tegra driver residual calculation fix - Owl driver register access fix * tag 'dmaengine-fix-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: idxd: Fix oops during rmmod on single-CPU platforms dmaengine: xilinx: xdma: Clarify kdoc in XDMA driver dmaengine: xilinx: xdma: Fix synchronization issue dmaengine: xilinx: xdma: Fix wrong offsets in the buffers addresses in dma descriptor dma: xilinx_dpdma: Fix locking dmaengine: idxd: Convert spinlock to mutex to lock evl workqueue idma64: Don't try to serve interrupts when device is powered off dmaengine: tegra186: Fix residual calculation dmaengine: owl: fix register access functions dmaengine: Revert "dmaengine: pl330: issue_pending waits until WFP state"
2 parents 63407d3 + f221033 commit 6fba14a

File tree

14 files changed

+64
-42
lines changed

14 files changed

+64
-42
lines changed

drivers/dma/idma64.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
171171
u32 status_err;
172172
unsigned short i;
173173

174+
/* Since IRQ may be shared, check if DMA controller is powered on */
175+
if (status == GENMASK(31, 0))
176+
return IRQ_NONE;
177+
174178
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
175179

176180
/* Check if we have any interrupt from the DMA controller */

drivers/dma/idxd/cdev.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
342342
if (!evl)
343343
return;
344344

345-
spin_lock(&evl->lock);
345+
mutex_lock(&evl->lock);
346346
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
347347
t = status.tail;
348348
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
354354
set_bit(h, evl->bmap);
355355
h = (h + 1) % size;
356356
}
357-
spin_unlock(&evl->lock);
358-
359357
drain_workqueue(wq->wq);
358+
mutex_unlock(&evl->lock);
360359
}
361360

362361
static int idxd_cdev_release(struct inode *node, struct file *filep)

drivers/dma/idxd/debugfs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
6666
if (!evl || !evl->log)
6767
return 0;
6868

69-
spin_lock(&evl->lock);
69+
mutex_lock(&evl->lock);
7070

7171
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
7272
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
8787
dump_event_entry(idxd, s, i, &count, processed);
8888
}
8989

90-
spin_unlock(&evl->lock);
90+
mutex_unlock(&evl->lock);
9191
return 0;
9292
}
9393

drivers/dma/idxd/device.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
775775
goto err_alloc;
776776
}
777777

778-
spin_lock(&evl->lock);
778+
mutex_lock(&evl->lock);
779779
evl->log = addr;
780780
evl->dma = dma_addr;
781781
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
796796
gencfg.evl_en = 1;
797797
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
798798

799-
spin_unlock(&evl->lock);
799+
mutex_unlock(&evl->lock);
800800
return 0;
801801

802802
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
819819
if (!gencfg.evl_en)
820820
return;
821821

822-
spin_lock(&evl->lock);
822+
mutex_lock(&evl->lock);
823823
gencfg.evl_en = 0;
824824
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
825825

@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
836836
evl_dma = evl->dma;
837837
evl->log = NULL;
838838
evl->size = IDXD_EVL_SIZE_MIN;
839-
spin_unlock(&evl->lock);
839+
mutex_unlock(&evl->lock);
840840

841841
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
842842
}

drivers/dma/idxd/idxd.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ struct idxd_driver_data {
293293

294294
struct idxd_evl {
295295
/* Lock to protect event log access. */
296-
spinlock_t lock;
296+
struct mutex lock;
297297
void *log;
298298
dma_addr_t dma;
299299
/* Total size of event log = number of entries * entry size. */

drivers/dma/idxd/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
354354
if (!evl)
355355
return -ENOMEM;
356356

357-
spin_lock_init(&evl->lock);
357+
mutex_init(&evl->lock);
358358
evl->size = IDXD_EVL_SIZE_MIN;
359359

360360
idxd_name = dev_name(idxd_confdev(idxd));

drivers/dma/idxd/irq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
363363
evl_status.bits = 0;
364364
evl_status.int_pending = 1;
365365

366-
spin_lock(&evl->lock);
366+
mutex_lock(&evl->lock);
367367
/* Clear interrupt pending bit */
368368
iowrite32(evl_status.bits_upper32,
369369
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
380380

381381
evl_status.head = h;
382382
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
383-
spin_unlock(&evl->lock);
383+
mutex_unlock(&evl->lock);
384384
}
385385

386386
irqreturn_t idxd_misc_thread(int vec, void *data)

drivers/dma/idxd/perfmon.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
528528
return 0;
529529

530530
target = cpumask_any_but(cpu_online_mask, cpu);
531-
532531
/* migrate events if there is a valid target */
533-
if (target < nr_cpu_ids)
532+
if (target < nr_cpu_ids) {
534533
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
535-
else
536-
target = -1;
537-
538-
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
534+
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
535+
}
539536

540537
return 0;
541538
}

drivers/dma/owl-dma.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
250250
else
251251
regval &= ~val;
252252

253-
writel(val, pchan->base + reg);
253+
writel(regval, pchan->base + reg);
254254
}
255255

256256
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
274274
else
275275
regval &= ~val;
276276

277-
writel(val, od->base + reg);
277+
writel(regval, od->base + reg);
278278
}
279279

280280
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

drivers/dma/pl330.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
10531053

10541054
thrd->req_running = idx;
10551055

1056-
if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
1057-
UNTIL(thrd, PL330_STATE_WFP);
1058-
10591056
return true;
10601057
}
10611058

drivers/dma/tegra186-gpc-dma.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
746746
bytes_xfer = dma_desc->bytes_xfer +
747747
sg_req[dma_desc->sg_idx].len - (wcount * 4);
748748

749+
if (dma_desc->bytes_req == bytes_xfer)
750+
return 0;
751+
749752
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
750753

751754
return residual;

drivers/dma/xilinx/xdma-regs.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
117117
CHAN_CTRL_IE_WRITE_ERROR | \
118118
CHAN_CTRL_IE_DESC_ERROR)
119119

120+
/* bits of the channel status register */
121+
#define XDMA_CHAN_STATUS_BUSY BIT(0)
122+
120123
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
121124

122125
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \

drivers/dma/xilinx/xdma.c

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ struct xdma_chan {
7171
enum dma_transfer_direction dir;
7272
struct dma_slave_config cfg;
7373
u32 irq;
74+
struct completion last_interrupt;
75+
bool stop_requested;
7476
};
7577

7678
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
376378
return ret;
377379

378380
xchan->busy = true;
381+
xchan->stop_requested = false;
382+
reinit_completion(&xchan->last_interrupt);
379383

380384
return 0;
381385
}
@@ -387,21 +391,14 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
387391
static int xdma_xfer_stop(struct xdma_chan *xchan)
388392
{
389393
int ret;
390-
u32 val;
391394
struct xdma_device *xdev = xchan->xdev_hdl;
392395

393396
/* clear run stop bit to prevent any further auto-triggering */
394397
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
395398
CHAN_CTRL_RUN_STOP);
396399
if (ret)
397400
return ret;
398-
399-
/* Clear the channel status register */
400-
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
401-
if (ret)
402-
return ret;
403-
404-
return 0;
401+
return ret;
405402
}
406403

407404
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
474471
xchan->xdev_hdl = xdev;
475472
xchan->base = base + i * XDMA_CHAN_STRIDE;
476473
xchan->dir = dir;
474+
xchan->stop_requested = false;
475+
init_completion(&xchan->last_interrupt);
477476

478477
ret = xdma_channel_init(xchan);
479478
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
521520
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
522521

523522
xdma_chan->busy = false;
523+
xdma_chan->stop_requested = true;
524524
vd = vchan_next_desc(&xdma_chan->vchan);
525525
if (vd) {
526526
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
542542
static void xdma_synchronize(struct dma_chan *chan)
543543
{
544544
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
545+
struct xdma_device *xdev = xdma_chan->xdev_hdl;
546+
int st = 0;
547+
548+
/* If the engine continues running, wait for the last interrupt */
549+
regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
550+
if (st & XDMA_CHAN_STATUS_BUSY)
551+
wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
545552

546553
vchan_synchronize(&xdma_chan->vchan);
547554
}
548555

549556
/**
550-
* xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
551-
* @sw_desc: tx descriptor state container
552-
* @src_addr: Value for a ->src_addr field of a first descriptor
553-
* @dst_addr: Value for a ->dst_addr field of a first descriptor
554-
* @size: Total size of a contiguous memory block
555-
* @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
557+
* xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
558+
* More than one descriptor will be used if the size is bigger
559+
* than XDMA_DESC_BLEN_MAX.
560+
* @sw_desc: Descriptor container
561+
* @src_addr: First value for the ->src_addr field
562+
* @dst_addr: First value for the ->dst_addr field
563+
* @size: Size of the contiguous memory block
564+
* @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
556565
*/
557566
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
558567
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
704713
desc_num = 0;
705714
for (i = 0; i < periods; i++) {
706715
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
707-
addr += i * period_size;
716+
addr += period_size;
708717
}
709718

710719
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
876885
u32 st;
877886
bool repeat_tx;
878887

888+
if (xchan->stop_requested)
889+
complete(&xchan->last_interrupt);
890+
879891
spin_lock(&xchan->vchan.lock);
880892

881893
/* get submitted request */

drivers/dma/xilinx/xilinx_dpdma.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
214214
* @running: true if the channel is running
215215
* @first_frame: flag for the first frame of stream
216216
* @video_group: flag if multi-channel operation is needed for video channels
217-
* @lock: lock to access struct xilinx_dpdma_chan
217+
* @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
218+
* @vchan.lock, if both are to be held.
218219
* @desc_pool: descriptor allocation pool
219220
* @err_task: error IRQ bottom half handler
220221
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
10971098
* Complete the active descriptor, if any, promote the pending
10981099
* descriptor to active, and queue the next transfer, if any.
10991100
*/
1101+
spin_lock(&chan->vchan.lock);
11001102
if (chan->desc.active)
11011103
vchan_cookie_complete(&chan->desc.active->vdesc);
11021104
chan->desc.active = pending;
11031105
chan->desc.pending = NULL;
11041106

11051107
xilinx_dpdma_chan_queue_transfer(chan);
1108+
spin_unlock(&chan->vchan.lock);
11061109

11071110
out:
11081111
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
12641267
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
12651268
unsigned long flags;
12661269

1267-
spin_lock_irqsave(&chan->vchan.lock, flags);
1270+
spin_lock_irqsave(&chan->lock, flags);
1271+
spin_lock(&chan->vchan.lock);
12681272
if (vchan_issue_pending(&chan->vchan))
12691273
xilinx_dpdma_chan_queue_transfer(chan);
1270-
spin_unlock_irqrestore(&chan->vchan.lock, flags);
1274+
spin_unlock(&chan->vchan.lock);
1275+
spin_unlock_irqrestore(&chan->lock, flags);
12711276
}
12721277

12731278
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
14951500
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
14961501

14971502
spin_lock_irqsave(&chan->lock, flags);
1503+
spin_lock(&chan->vchan.lock);
14981504
xilinx_dpdma_chan_queue_transfer(chan);
1505+
spin_unlock(&chan->vchan.lock);
14991506
spin_unlock_irqrestore(&chan->lock, flags);
15001507
}
15011508

0 commit comments

Comments
 (0)