@@ -93,7 +93,24 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
93
93
kmem_cache_free (pt -> dma_desc_cache , desc );
94
94
}
95
95
96
- static int pt_dma_start_desc (struct pt_dma_desc * desc )
96
+ static struct pt_cmd_queue * pt_get_cmd_queue (struct pt_device * pt , struct pt_dma_chan * chan )
97
+ {
98
+ struct ae4_cmd_queue * ae4cmd_q ;
99
+ struct pt_cmd_queue * cmd_q ;
100
+ struct ae4_device * ae4 ;
101
+
102
+ if (pt -> ver == AE4_DMA_VERSION ) {
103
+ ae4 = container_of (pt , struct ae4_device , pt );
104
+ ae4cmd_q = & ae4 -> ae4cmd_q [chan -> id ];
105
+ cmd_q = & ae4cmd_q -> cmd_q ;
106
+ } else {
107
+ cmd_q = & pt -> cmd_q ;
108
+ }
109
+
110
+ return cmd_q ;
111
+ }
112
+
113
+ static int pt_dma_start_desc (struct pt_dma_desc * desc , struct pt_dma_chan * chan )
97
114
{
98
115
struct pt_passthru_engine * pt_engine ;
99
116
struct pt_device * pt ;
@@ -104,7 +121,9 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
104
121
105
122
pt_cmd = & desc -> pt_cmd ;
106
123
pt = pt_cmd -> pt ;
107
- cmd_q = & pt -> cmd_q ;
124
+
125
+ cmd_q = pt_get_cmd_queue (pt , chan );
126
+
108
127
pt_engine = & pt_cmd -> passthru ;
109
128
110
129
pt -> tdata .cmd = pt_cmd ;
@@ -199,7 +218,7 @@ static void pt_cmd_callback(void *data, int err)
199
218
if (!desc )
200
219
break ;
201
220
202
- ret = pt_dma_start_desc (desc );
221
+ ret = pt_dma_start_desc (desc , chan );
203
222
if (!ret )
204
223
break ;
205
224
@@ -234,15 +253,18 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
234
253
{
235
254
struct pt_dma_chan * chan = to_pt_chan (dma_chan );
236
255
struct pt_passthru_engine * pt_engine ;
256
+ struct pt_device * pt = chan -> pt ;
257
+ struct ae4_cmd_queue * ae4cmd_q ;
237
258
struct pt_dma_desc * desc ;
259
+ struct ae4_device * ae4 ;
238
260
struct pt_cmd * pt_cmd ;
239
261
240
262
desc = pt_alloc_dma_desc (chan , flags );
241
263
if (!desc )
242
264
return NULL ;
243
265
244
266
pt_cmd = & desc -> pt_cmd ;
245
- pt_cmd -> pt = chan -> pt ;
267
+ pt_cmd -> pt = pt ;
246
268
pt_engine = & pt_cmd -> passthru ;
247
269
pt_cmd -> engine = PT_ENGINE_PASSTHRU ;
248
270
pt_engine -> src_dma = src ;
@@ -253,6 +275,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
253
275
254
276
desc -> len = len ;
255
277
278
+ if (pt -> ver == AE4_DMA_VERSION ) {
279
+ ae4 = container_of (pt , struct ae4_device , pt );
280
+ ae4cmd_q = & ae4 -> ae4cmd_q [chan -> id ];
281
+ mutex_lock (& ae4cmd_q -> cmd_lock );
282
+ list_add_tail (& pt_cmd -> entry , & ae4cmd_q -> cmd );
283
+ mutex_unlock (& ae4cmd_q -> cmd_lock );
284
+ }
285
+
256
286
return desc ;
257
287
}
258
288
@@ -310,8 +340,11 @@ static enum dma_status
310
340
pt_tx_status (struct dma_chan * c , dma_cookie_t cookie ,
311
341
struct dma_tx_state * txstate )
312
342
{
313
- struct pt_device * pt = to_pt_chan (c )-> pt ;
314
- struct pt_cmd_queue * cmd_q = & pt -> cmd_q ;
343
+ struct pt_dma_chan * chan = to_pt_chan (c );
344
+ struct pt_device * pt = chan -> pt ;
345
+ struct pt_cmd_queue * cmd_q ;
346
+
347
+ cmd_q = pt_get_cmd_queue (pt , chan );
315
348
316
349
pt_check_status_trans (pt , cmd_q );
317
350
return dma_cookie_status (c , cookie , txstate );
@@ -320,10 +353,13 @@ pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
320
353
static int pt_pause (struct dma_chan * dma_chan )
321
354
{
322
355
struct pt_dma_chan * chan = to_pt_chan (dma_chan );
356
+ struct pt_device * pt = chan -> pt ;
357
+ struct pt_cmd_queue * cmd_q ;
323
358
unsigned long flags ;
324
359
325
360
spin_lock_irqsave (& chan -> vc .lock , flags );
326
- pt_stop_queue (& chan -> pt -> cmd_q );
361
+ cmd_q = pt_get_cmd_queue (pt , chan );
362
+ pt_stop_queue (cmd_q );
327
363
spin_unlock_irqrestore (& chan -> vc .lock , flags );
328
364
329
365
return 0 ;
@@ -333,10 +369,13 @@ static int pt_resume(struct dma_chan *dma_chan)
333
369
{
334
370
struct pt_dma_chan * chan = to_pt_chan (dma_chan );
335
371
struct pt_dma_desc * desc = NULL ;
372
+ struct pt_device * pt = chan -> pt ;
373
+ struct pt_cmd_queue * cmd_q ;
336
374
unsigned long flags ;
337
375
338
376
spin_lock_irqsave (& chan -> vc .lock , flags );
339
- pt_start_queue (& chan -> pt -> cmd_q );
377
+ cmd_q = pt_get_cmd_queue (pt , chan );
378
+ pt_start_queue (cmd_q );
340
379
desc = pt_next_dma_desc (chan );
341
380
spin_unlock_irqrestore (& chan -> vc .lock , flags );
342
381
@@ -350,11 +389,17 @@ static int pt_resume(struct dma_chan *dma_chan)
350
389
static int pt_terminate_all (struct dma_chan * dma_chan )
351
390
{
352
391
struct pt_dma_chan * chan = to_pt_chan (dma_chan );
392
+ struct pt_device * pt = chan -> pt ;
393
+ struct pt_cmd_queue * cmd_q ;
353
394
unsigned long flags ;
354
- struct pt_cmd_queue * cmd_q = & chan -> pt -> cmd_q ;
355
395
LIST_HEAD (head );
356
396
357
- iowrite32 (SUPPORTED_INTERRUPTS , cmd_q -> reg_control + 0x0010 );
397
+ cmd_q = pt_get_cmd_queue (pt , chan );
398
+ if (pt -> ver == AE4_DMA_VERSION )
399
+ pt_stop_queue (cmd_q );
400
+ else
401
+ iowrite32 (SUPPORTED_INTERRUPTS , cmd_q -> reg_control + 0x0010 );
402
+
358
403
spin_lock_irqsave (& chan -> vc .lock , flags );
359
404
vchan_get_all_descriptors (& chan -> vc , & head );
360
405
spin_unlock_irqrestore (& chan -> vc .lock , flags );
@@ -367,14 +412,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
367
412
368
413
int pt_dmaengine_register (struct pt_device * pt )
369
414
{
370
- struct pt_dma_chan * chan ;
371
415
struct dma_device * dma_dev = & pt -> dma_dev ;
372
- char * cmd_cache_name ;
416
+ struct ae4_cmd_queue * ae4cmd_q = NULL ;
417
+ struct ae4_device * ae4 = NULL ;
418
+ struct pt_dma_chan * chan ;
373
419
char * desc_cache_name ;
374
- int ret ;
420
+ char * cmd_cache_name ;
421
+ int ret , i ;
422
+
423
+ if (pt -> ver == AE4_DMA_VERSION )
424
+ ae4 = container_of (pt , struct ae4_device , pt );
425
+
426
+ if (ae4 )
427
+ pt -> pt_dma_chan = devm_kcalloc (pt -> dev , ae4 -> cmd_q_count ,
428
+ sizeof (* pt -> pt_dma_chan ), GFP_KERNEL );
429
+ else
430
+ pt -> pt_dma_chan = devm_kzalloc (pt -> dev , sizeof (* pt -> pt_dma_chan ),
431
+ GFP_KERNEL );
375
432
376
- pt -> pt_dma_chan = devm_kzalloc (pt -> dev , sizeof (* pt -> pt_dma_chan ),
377
- GFP_KERNEL );
378
433
if (!pt -> pt_dma_chan )
379
434
return - ENOMEM ;
380
435
@@ -416,9 +471,6 @@ int pt_dmaengine_register(struct pt_device *pt)
416
471
417
472
INIT_LIST_HEAD (& dma_dev -> channels );
418
473
419
- chan = pt -> pt_dma_chan ;
420
- chan -> pt = pt ;
421
-
422
474
/* Set base and prep routines */
423
475
dma_dev -> device_free_chan_resources = pt_free_chan_resources ;
424
476
dma_dev -> device_prep_dma_memcpy = pt_prep_dma_memcpy ;
@@ -430,8 +482,21 @@ int pt_dmaengine_register(struct pt_device *pt)
430
482
dma_dev -> device_terminate_all = pt_terminate_all ;
431
483
dma_dev -> device_synchronize = pt_synchronize ;
432
484
433
- chan -> vc .desc_free = pt_do_cleanup ;
434
- vchan_init (& chan -> vc , dma_dev );
485
+ if (ae4 ) {
486
+ for (i = 0 ; i < ae4 -> cmd_q_count ; i ++ ) {
487
+ chan = pt -> pt_dma_chan + i ;
488
+ ae4cmd_q = & ae4 -> ae4cmd_q [i ];
489
+ chan -> id = ae4cmd_q -> id ;
490
+ chan -> pt = pt ;
491
+ chan -> vc .desc_free = pt_do_cleanup ;
492
+ vchan_init (& chan -> vc , dma_dev );
493
+ }
494
+ } else {
495
+ chan = pt -> pt_dma_chan ;
496
+ chan -> pt = pt ;
497
+ chan -> vc .desc_free = pt_do_cleanup ;
498
+ vchan_init (& chan -> vc , dma_dev );
499
+ }
435
500
436
501
ret = dma_async_device_register (dma_dev );
437
502
if (ret )
0 commit comments