@@ -198,8 +198,10 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
198
198
{
199
199
struct dma_async_tx_descriptor * tx_desc ;
200
200
struct virt_dma_desc * vd ;
201
+ struct pt_device * pt ;
201
202
unsigned long flags ;
202
203
204
+ pt = chan -> pt ;
203
205
/* Loop over descriptors until one is found with commands */
204
206
do {
205
207
if (desc ) {
@@ -217,7 +219,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
217
219
218
220
spin_lock_irqsave (& chan -> vc .lock , flags );
219
221
220
- if (desc ) {
222
+ if (pt -> ver != AE4_DMA_VERSION && desc ) {
221
223
if (desc -> status != DMA_COMPLETE ) {
222
224
if (desc -> status != DMA_ERROR )
223
225
desc -> status = DMA_COMPLETE ;
@@ -235,7 +237,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
235
237
236
238
spin_unlock_irqrestore (& chan -> vc .lock , flags );
237
239
238
- if (tx_desc ) {
240
+ if (pt -> ver != AE4_DMA_VERSION && tx_desc ) {
239
241
dmaengine_desc_get_callback_invoke (tx_desc , NULL );
240
242
dma_run_dependencies (tx_desc );
241
243
vchan_vdesc_fini (vd );
@@ -245,23 +247,58 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
245
247
return NULL ;
246
248
}
247
249
250
+ static inline bool ae4_core_queue_full (struct pt_cmd_queue * cmd_q )
251
+ {
252
+ u32 front_wi = readl (cmd_q -> reg_control + AE4_WR_IDX_OFF );
253
+ u32 rear_ri = readl (cmd_q -> reg_control + AE4_RD_IDX_OFF );
254
+
255
+ if (((MAX_CMD_QLEN + front_wi - rear_ri ) % MAX_CMD_QLEN ) >= (MAX_CMD_QLEN - 1 ))
256
+ return true;
257
+
258
+ return false;
259
+ }
260
+
248
261
static void pt_cmd_callback (void * data , int err )
249
262
{
250
263
struct pt_dma_desc * desc = data ;
264
+ struct ae4_cmd_queue * ae4cmd_q ;
251
265
struct dma_chan * dma_chan ;
252
266
struct pt_dma_chan * chan ;
267
+ struct ae4_device * ae4 ;
268
+ struct pt_device * pt ;
253
269
int ret ;
254
270
255
271
if (err == - EINPROGRESS )
256
272
return ;
257
273
258
274
dma_chan = desc -> vd .tx .chan ;
259
275
chan = to_pt_chan (dma_chan );
276
+ pt = chan -> pt ;
260
277
261
278
if (err )
262
279
desc -> status = DMA_ERROR ;
263
280
264
281
while (true) {
282
+ if (pt -> ver == AE4_DMA_VERSION ) {
283
+ ae4 = container_of (pt , struct ae4_device , pt );
284
+ ae4cmd_q = & ae4 -> ae4cmd_q [chan -> id ];
285
+
286
+ if (ae4cmd_q -> q_cmd_count >= (CMD_Q_LEN - 1 ) ||
287
+ ae4_core_queue_full (& ae4cmd_q -> cmd_q )) {
288
+ wake_up (& ae4cmd_q -> q_w );
289
+
290
+ if (wait_for_completion_timeout (& ae4cmd_q -> cmp ,
291
+ msecs_to_jiffies (AE4_TIME_OUT ))
292
+ == 0 ) {
293
+ dev_err (pt -> dev , "TIMEOUT %d:\n" , ae4cmd_q -> id );
294
+ break ;
295
+ }
296
+
297
+ reinit_completion (& ae4cmd_q -> cmp );
298
+ continue ;
299
+ }
300
+ }
301
+
265
302
/* Check for DMA descriptor completion */
266
303
desc = pt_handle_active_desc (chan , desc );
267
304
@@ -296,6 +333,49 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
296
333
return desc ;
297
334
}
298
335
336
+ static void pt_cmd_callback_work (void * data , int err )
337
+ {
338
+ struct dma_async_tx_descriptor * tx_desc ;
339
+ struct pt_dma_desc * desc = data ;
340
+ struct dma_chan * dma_chan ;
341
+ struct virt_dma_desc * vd ;
342
+ struct pt_dma_chan * chan ;
343
+ unsigned long flags ;
344
+
345
+ dma_chan = desc -> vd .tx .chan ;
346
+ chan = to_pt_chan (dma_chan );
347
+
348
+ if (err == - EINPROGRESS )
349
+ return ;
350
+
351
+ tx_desc = & desc -> vd .tx ;
352
+ vd = & desc -> vd ;
353
+
354
+ if (err )
355
+ desc -> status = DMA_ERROR ;
356
+
357
+ spin_lock_irqsave (& chan -> vc .lock , flags );
358
+ if (desc ) {
359
+ if (desc -> status != DMA_COMPLETE ) {
360
+ if (desc -> status != DMA_ERROR )
361
+ desc -> status = DMA_COMPLETE ;
362
+
363
+ dma_cookie_complete (tx_desc );
364
+ dma_descriptor_unmap (tx_desc );
365
+ } else {
366
+ tx_desc = NULL ;
367
+ }
368
+ }
369
+ spin_unlock_irqrestore (& chan -> vc .lock , flags );
370
+
371
+ if (tx_desc ) {
372
+ dmaengine_desc_get_callback_invoke (tx_desc , NULL );
373
+ dma_run_dependencies (tx_desc );
374
+ list_del (& desc -> vd .node );
375
+ vchan_vdesc_fini (vd );
376
+ }
377
+ }
378
+
299
379
static struct pt_dma_desc * pt_create_desc (struct dma_chan * dma_chan ,
300
380
dma_addr_t dst ,
301
381
dma_addr_t src ,
@@ -327,6 +407,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
327
407
desc -> len = len ;
328
408
329
409
if (pt -> ver == AE4_DMA_VERSION ) {
410
+ pt_cmd -> pt_cmd_callback = pt_cmd_callback_work ;
330
411
ae4 = container_of (pt , struct ae4_device , pt );
331
412
ae4cmd_q = & ae4 -> ae4cmd_q [chan -> id ];
332
413
mutex_lock (& ae4cmd_q -> cmd_lock );
@@ -367,13 +448,16 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
367
448
{
368
449
struct pt_dma_chan * chan = to_pt_chan (dma_chan );
369
450
struct pt_dma_desc * desc ;
451
+ struct pt_device * pt ;
370
452
unsigned long flags ;
371
453
bool engine_is_idle = true;
372
454
455
+ pt = chan -> pt ;
456
+
373
457
spin_lock_irqsave (& chan -> vc .lock , flags );
374
458
375
459
desc = pt_next_dma_desc (chan );
376
- if (desc )
460
+ if (desc && pt -> ver != AE4_DMA_VERSION )
377
461
engine_is_idle = false;
378
462
379
463
vchan_issue_pending (& chan -> vc );
0 commit comments