31
31
#define OMAP242X_DMA_EXT_DMAREQ4 16
32
32
#define OMAP242X_DMA_EXT_DMAREQ5 64
33
33
34
+ struct tusb_dma_data {
35
+ int ch ;
36
+ s8 dmareq ;
37
+ s8 sync_dev ;
38
+ };
39
+
34
40
struct tusb_omap_dma_ch {
35
41
struct musb * musb ;
36
42
void __iomem * tbase ;
@@ -39,9 +45,7 @@ struct tusb_omap_dma_ch {
39
45
u8 tx ;
40
46
struct musb_hw_ep * hw_ep ;
41
47
42
- int ch ;
43
- s8 dmareq ;
44
- s8 sync_dev ;
48
+ struct tusb_dma_data dma_data ;
45
49
46
50
struct tusb_omap_dma * tusb_dma ;
47
51
@@ -58,9 +62,7 @@ struct tusb_omap_dma {
58
62
struct dma_controller controller ;
59
63
void __iomem * tbase ;
60
64
61
- int ch ;
62
- s8 dmareq ;
63
- s8 sync_dev ;
65
+ struct tusb_dma_data dma_data ;
64
66
unsigned multichannel :1 ;
65
67
};
66
68
@@ -119,9 +121,9 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
119
121
spin_lock_irqsave (& musb -> lock , flags );
120
122
121
123
if (tusb_dma -> multichannel )
122
- ch = chdat -> ch ;
124
+ ch = chdat -> dma_data . ch ;
123
125
else
124
- ch = tusb_dma -> ch ;
126
+ ch = tusb_dma -> dma_data . ch ;
125
127
126
128
if (ch_status != OMAP_DMA_BLOCK_IRQ )
127
129
printk (KERN_ERR "TUSB DMA error status: %i\n" , ch_status );
@@ -140,8 +142,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
140
142
/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
141
143
if (unlikely (remaining > chdat -> transfer_len )) {
142
144
dev_dbg (musb -> controller , "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n" ,
143
- chdat -> tx ? "tx" : "rx" , chdat -> ch ,
144
- remaining );
145
+ chdat -> tx ? "tx" : "rx" , ch , remaining );
145
146
remaining = 0 ;
146
147
}
147
148
@@ -220,9 +221,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
220
221
int src_burst , dst_burst ;
221
222
u16 csr ;
222
223
u32 psize ;
223
- int ch ;
224
- s8 dmareq ;
225
- s8 sync_dev ;
224
+ struct tusb_dma_data * dma_data ;
226
225
227
226
if (unlikely (dma_addr & 0x1 ) || (len < 32 ) || (len > packet_sz ))
228
227
return false;
@@ -249,7 +248,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
249
248
dma_remaining = TUSB_EP_CONFIG_XFR_SIZE (dma_remaining );
250
249
if (dma_remaining ) {
251
250
dev_dbg (musb -> controller , "Busy %s dma ch%i, not using: %08x\n" ,
252
- chdat -> tx ? "tx" : "rx" , chdat -> ch ,
251
+ chdat -> tx ? "tx" : "rx" , chdat -> dma_data . ch ,
253
252
dma_remaining );
254
253
return false;
255
254
}
@@ -262,26 +261,23 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
262
261
chdat -> transfer_packet_sz = packet_sz ;
263
262
264
263
if (tusb_dma -> multichannel ) {
265
- ch = chdat -> ch ;
266
- dmareq = chdat -> dmareq ;
267
- sync_dev = chdat -> sync_dev ;
264
+ dma_data = & chdat -> dma_data ;
268
265
} else {
266
+ dma_data = & tusb_dma -> dma_data ;
267
+
269
268
if (tusb_omap_use_shared_dmareq (chdat ) != 0 ) {
270
269
dev_dbg (musb -> controller , "could not get dma for ep%i\n" , chdat -> epnum );
271
270
return false;
272
271
}
273
- if (tusb_dma -> ch < 0 ) {
272
+ if (dma_data -> ch < 0 ) {
274
273
/* REVISIT: This should get blocked earlier, happens
275
274
* with MSC ErrorRecoveryTest
276
275
*/
277
276
WARN_ON (1 );
278
277
return false;
279
278
}
280
279
281
- ch = tusb_dma -> ch ;
282
- dmareq = tusb_dma -> dmareq ;
283
- sync_dev = tusb_dma -> sync_dev ;
284
- omap_set_dma_callback (ch , tusb_omap_dma_cb , channel );
280
+ omap_set_dma_callback (dma_data -> ch , tusb_omap_dma_cb , channel );
285
281
}
286
282
287
283
chdat -> packet_sz = packet_sz ;
@@ -312,7 +308,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
312
308
313
309
dev_dbg (musb -> controller , "ep%i %s dma ch%i dma: %pad len: %u(%u) packet_sz: %i(%i)\n" ,
314
310
chdat -> epnum , chdat -> tx ? "tx" : "rx" ,
315
- ch , & dma_addr , chdat -> transfer_len , len ,
311
+ dma_data -> ch , & dma_addr , chdat -> transfer_len , len ,
316
312
chdat -> transfer_packet_sz , packet_sz );
317
313
318
314
/*
@@ -329,7 +325,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
329
325
dma_params .dst_ei = 1 ;
330
326
dma_params .dst_fi = -31 ; /* Loop 32 byte window */
331
327
332
- dma_params .trigger = sync_dev ;
328
+ dma_params .trigger = dma_data -> sync_dev ;
333
329
dma_params .sync_mode = OMAP_DMA_SYNC_FRAME ;
334
330
dma_params .src_or_dst_synch = 0 ; /* Dest sync */
335
331
@@ -346,7 +342,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
346
342
dma_params .dst_ei = 0 ;
347
343
dma_params .dst_fi = 0 ;
348
344
349
- dma_params .trigger = sync_dev ;
345
+ dma_params .trigger = dma_data -> sync_dev ;
350
346
dma_params .sync_mode = OMAP_DMA_SYNC_FRAME ;
351
347
dma_params .src_or_dst_synch = 1 ; /* Source sync */
352
348
@@ -360,10 +356,10 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
360
356
((dma_addr & 0x3 ) == 0 ) ? "sync" : "async" ,
361
357
dma_params .src_start , dma_params .dst_start );
362
358
363
- omap_set_dma_params (ch , & dma_params );
364
- omap_set_dma_src_burst_mode (ch , src_burst );
365
- omap_set_dma_dest_burst_mode (ch , dst_burst );
366
- omap_set_dma_write_mode (ch , OMAP_DMA_WRITE_LAST_NON_POSTED );
359
+ omap_set_dma_params (dma_data -> ch , & dma_params );
360
+ omap_set_dma_src_burst_mode (dma_data -> ch , src_burst );
361
+ omap_set_dma_dest_burst_mode (dma_data -> ch , dst_burst );
362
+ omap_set_dma_write_mode (dma_data -> ch , OMAP_DMA_WRITE_LAST_NON_POSTED );
367
363
368
364
/*
369
365
* Prepare MUSB for DMA transfer
@@ -386,7 +382,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
386
382
/*
387
383
* Start DMA transfer
388
384
*/
389
- omap_start_dma (ch );
385
+ omap_start_dma (dma_data -> ch );
390
386
391
387
if (chdat -> tx ) {
392
388
/* Send transfer_packet_sz packets at a time */
@@ -415,16 +411,17 @@ static int tusb_omap_dma_abort(struct dma_channel *channel)
415
411
{
416
412
struct tusb_omap_dma_ch * chdat = to_chdat (channel );
417
413
struct tusb_omap_dma * tusb_dma = chdat -> tusb_dma ;
414
+ struct tusb_dma_data * dma_data = & tusb_dma -> dma_data ;
418
415
419
416
if (!tusb_dma -> multichannel ) {
420
- if (tusb_dma -> ch >= 0 ) {
421
- omap_stop_dma (tusb_dma -> ch );
422
- omap_free_dma (tusb_dma -> ch );
423
- tusb_dma -> ch = -1 ;
417
+ if (dma_data -> ch >= 0 ) {
418
+ omap_stop_dma (dma_data -> ch );
419
+ omap_free_dma (dma_data -> ch );
420
+ dma_data -> ch = -1 ;
424
421
}
425
422
426
- tusb_dma -> dmareq = -1 ;
427
- tusb_dma -> sync_dev = -1 ;
423
+ dma_data -> dmareq = -1 ;
424
+ dma_data -> sync_dev = -1 ;
428
425
}
429
426
430
427
channel -> status = MUSB_DMA_STATUS_FREE ;
@@ -462,8 +459,8 @@ static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
462
459
reg |= ((1 << 4 ) << (dmareq_nr * 5 ));
463
460
musb_writel (chdat -> tbase , TUSB_DMA_EP_MAP , reg );
464
461
465
- chdat -> dmareq = dmareq_nr ;
466
- chdat -> sync_dev = sync_dev [chdat -> dmareq ];
462
+ chdat -> dma_data . dmareq = dmareq_nr ;
463
+ chdat -> dma_data . sync_dev = sync_dev [chdat -> dma_data . dmareq ];
467
464
468
465
return 0 ;
469
466
}
@@ -472,15 +469,15 @@ static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
472
469
{
473
470
u32 reg ;
474
471
475
- if (!chdat || chdat -> dmareq < 0 )
472
+ if (!chdat || chdat -> dma_data . dmareq < 0 )
476
473
return ;
477
474
478
475
reg = musb_readl (chdat -> tbase , TUSB_DMA_EP_MAP );
479
- reg &= ~(0x1f << (chdat -> dmareq * 5 ));
476
+ reg &= ~(0x1f << (chdat -> dma_data . dmareq * 5 ));
480
477
musb_writel (chdat -> tbase , TUSB_DMA_EP_MAP , reg );
481
478
482
- chdat -> dmareq = -1 ;
483
- chdat -> sync_dev = -1 ;
479
+ chdat -> dma_data . dmareq = -1 ;
480
+ chdat -> dma_data . sync_dev = -1 ;
484
481
}
485
482
486
483
static struct dma_channel * dma_channel_pool [MAX_DMAREQ ];
@@ -492,11 +489,13 @@ tusb_omap_dma_allocate(struct dma_controller *c,
492
489
{
493
490
int ret , i ;
494
491
const char * dev_name ;
492
+ void * cb_data ;
495
493
struct tusb_omap_dma * tusb_dma ;
496
494
struct musb * musb ;
497
495
void __iomem * tbase ;
498
496
struct dma_channel * channel = NULL ;
499
497
struct tusb_omap_dma_ch * chdat = NULL ;
498
+ struct tusb_dma_data * dma_data = NULL ;
500
499
u32 reg ;
501
500
502
501
tusb_dma = container_of (c , struct tusb_omap_dma , controller );
@@ -529,56 +528,62 @@ tusb_omap_dma_allocate(struct dma_controller *c,
529
528
if (!channel )
530
529
return NULL ;
531
530
532
- if (tx ) {
533
- chdat -> tx = 1 ;
534
- dev_name = "TUSB transmit" ;
535
- } else {
536
- chdat -> tx = 0 ;
537
- dev_name = "TUSB receive" ;
538
- }
539
-
540
531
chdat -> musb = tusb_dma -> controller .musb ;
541
532
chdat -> tbase = tusb_dma -> tbase ;
542
533
chdat -> hw_ep = hw_ep ;
543
534
chdat -> epnum = hw_ep -> epnum ;
544
- chdat -> dmareq = -1 ;
545
535
chdat -> completed_len = 0 ;
546
536
chdat -> tusb_dma = tusb_dma ;
537
+ if (tx )
538
+ chdat -> tx = 1 ;
539
+ else
540
+ chdat -> tx = 0 ;
547
541
548
542
channel -> max_len = 0x7fffffff ;
549
543
channel -> desired_mode = 0 ;
550
544
channel -> actual_len = 0 ;
551
545
552
546
if (tusb_dma -> multichannel ) {
547
+ dma_data = & chdat -> dma_data ;
553
548
ret = tusb_omap_dma_allocate_dmareq (chdat );
554
549
if (ret != 0 )
555
550
goto free_dmareq ;
556
551
557
- ret = omap_request_dma (chdat -> sync_dev , dev_name ,
558
- tusb_omap_dma_cb , channel , & chdat -> ch );
559
- if (ret != 0 )
560
- goto free_dmareq ;
561
- } else if (tusb_dma -> ch == -1 ) {
562
- tusb_dma -> dmareq = 0 ;
563
- tusb_dma -> sync_dev = OMAP24XX_DMA_EXT_DMAREQ0 ;
564
-
552
+ if (chdat -> tx )
553
+ dev_name = "TUSB transmit" ;
554
+ else
555
+ dev_name = "TUSB receive" ;
556
+ cb_data = channel ;
557
+ } else if (tusb_dma -> dma_data .ch == -1 ) {
558
+ dma_data = & tusb_dma -> dma_data ;
559
+ dma_data -> dmareq = 0 ;
560
+ dma_data -> sync_dev = OMAP24XX_DMA_EXT_DMAREQ0 ;
561
+
562
+ dev_name = "TUSB shared" ;
565
563
/* Callback data gets set later in the shared dmareq case */
566
- ret = omap_request_dma (tusb_dma -> sync_dev , "TUSB shared" ,
567
- tusb_omap_dma_cb , NULL , & tusb_dma -> ch );
564
+ cb_data = NULL ;
565
+
566
+ chdat -> dma_data .dmareq = -1 ;
567
+ chdat -> dma_data .ch = -1 ;
568
+ chdat -> dma_data .sync_dev = -1 ;
569
+ }
570
+
571
+ if (dma_data ) {
572
+ ret = omap_request_dma (dma_data -> sync_dev , dev_name ,
573
+ tusb_omap_dma_cb , cb_data ,
574
+ & dma_data -> ch );
568
575
if (ret != 0 )
569
576
goto free_dmareq ;
570
-
571
- chdat -> dmareq = -1 ;
572
- chdat -> ch = -1 ;
577
+ } else {
578
+ /* Already allocated shared, single DMA channel. */
579
+ dma_data = & tusb_dma -> dma_data ;
573
580
}
574
581
575
582
dev_dbg (musb -> controller , "ep%i %s dma: %s dma%i dmareq%i sync%i\n" ,
576
583
chdat -> epnum ,
577
584
chdat -> tx ? "tx" : "rx" ,
578
- chdat -> ch >= 0 ? "dedicated" : "shared" ,
579
- chdat -> ch >= 0 ? chdat -> ch : tusb_dma -> ch ,
580
- chdat -> dmareq >= 0 ? chdat -> dmareq : tusb_dma -> dmareq ,
581
- chdat -> sync_dev >= 0 ? chdat -> sync_dev : tusb_dma -> sync_dev );
585
+ chdat -> dma_data .ch >= 0 ? "dedicated" : "shared" ,
586
+ dma_data -> ch , dma_data -> dmareq , dma_data -> sync_dev );
582
587
583
588
return channel ;
584
589
@@ -598,7 +603,8 @@ static void tusb_omap_dma_release(struct dma_channel *channel)
598
603
void __iomem * tbase = musb -> ctrl_base ;
599
604
u32 reg ;
600
605
601
- dev_dbg (musb -> controller , "ep%i ch%i\n" , chdat -> epnum , chdat -> ch );
606
+ dev_dbg (musb -> controller , "ep%i ch%i\n" , chdat -> epnum ,
607
+ chdat -> dma_data .ch );
602
608
603
609
reg = musb_readl (tbase , TUSB_DMA_INT_MASK );
604
610
if (chdat -> tx )
@@ -616,13 +622,13 @@ static void tusb_omap_dma_release(struct dma_channel *channel)
616
622
617
623
channel -> status = MUSB_DMA_STATUS_UNKNOWN ;
618
624
619
- if (chdat -> ch >= 0 ) {
620
- omap_stop_dma (chdat -> ch );
621
- omap_free_dma (chdat -> ch );
622
- chdat -> ch = -1 ;
625
+ if (chdat -> dma_data . ch >= 0 ) {
626
+ omap_stop_dma (chdat -> dma_data . ch );
627
+ omap_free_dma (chdat -> dma_data . ch );
628
+ chdat -> dma_data . ch = -1 ;
623
629
}
624
630
625
- if (chdat -> dmareq >= 0 )
631
+ if (chdat -> dma_data . dmareq >= 0 )
626
632
tusb_omap_dma_free_dmareq (chdat );
627
633
628
634
channel = NULL ;
@@ -642,8 +648,8 @@ void tusb_dma_controller_destroy(struct dma_controller *c)
642
648
}
643
649
}
644
650
645
- if (tusb_dma && !tusb_dma -> multichannel && tusb_dma -> ch >= 0 )
646
- omap_free_dma (tusb_dma -> ch );
651
+ if (tusb_dma && !tusb_dma -> multichannel && tusb_dma -> dma_data . ch >= 0 )
652
+ omap_free_dma (tusb_dma -> dma_data . ch );
647
653
648
654
kfree (tusb_dma );
649
655
}
@@ -673,9 +679,9 @@ tusb_dma_controller_create(struct musb *musb, void __iomem *base)
673
679
tusb_dma -> controller .musb = musb ;
674
680
tusb_dma -> tbase = musb -> ctrl_base ;
675
681
676
- tusb_dma -> ch = -1 ;
677
- tusb_dma -> dmareq = -1 ;
678
- tusb_dma -> sync_dev = -1 ;
682
+ tusb_dma -> dma_data . ch = -1 ;
683
+ tusb_dma -> dma_data . dmareq = -1 ;
684
+ tusb_dma -> dma_data . sync_dev = -1 ;
679
685
680
686
tusb_dma -> controller .channel_alloc = tusb_omap_dma_allocate ;
681
687
tusb_dma -> controller .channel_release = tusb_omap_dma_release ;
0 commit comments