@@ -104,25 +104,14 @@ struct msgdma_extended_desc {
104
104
#define MSGDMA_DESC_STRIDE_WR 0x00010000
105
105
#define MSGDMA_DESC_STRIDE_RW 0x00010001
106
106
107
- /**
108
- * struct msgdma_csr - mSGDMA dispatcher control and status register map
109
- * @status: Read/Clear
110
- * @control: Read/Write
111
- * @rw_fill_level: bit 31:16 - write fill level
112
- * bit 15:00 - read fill level
113
- * @resp_fill_level: bit 15:00 - response FIFO fill level
114
- * @rw_seq_num: bit 31:16 - write sequence number
115
- * bit 15:00 - read sequence number
116
- * @pad: reserved
117
- */
118
- struct msgdma_csr {
119
- u32 status ;
120
- u32 control ;
121
- u32 rw_fill_level ;
122
- u32 resp_fill_level ;
123
- u32 rw_seq_num ;
124
- u32 pad [3 ];
125
- };
107
+ /* mSGDMA dispatcher control and status register map */
108
+ #define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */
109
+ #define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */
110
+ #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
111
+ /* 15:00 - read fill level */
112
+ #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */
113
+ #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
114
+ /* 15:00 - read seq number */
126
115
127
116
/* mSGDMA CSR status register bit definitions */
128
117
#define MSGDMA_CSR_STAT_BUSY BIT(0)
@@ -157,10 +146,8 @@ struct msgdma_csr {
157
146
#define MSGDMA_CSR_SEQ_NUM_GET (v ) (((v) & 0xffff0000) >> 16)
158
147
159
148
/* mSGDMA response register map */
160
- struct msgdma_response {
161
- u32 bytes_transferred ;
162
- u32 status ;
163
- };
149
+ #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
150
+ #define MSGDMA_RESP_STATUS 0x04
164
151
165
152
/* mSGDMA response register bit definitions */
166
153
#define MSGDMA_RESP_EARLY_TERM BIT(8)
@@ -204,13 +191,13 @@ struct msgdma_device {
204
191
int irq ;
205
192
206
193
/* mSGDMA controller */
207
- struct msgdma_csr * csr ;
194
+ void __iomem * csr ;
208
195
209
196
/* mSGDMA descriptors */
210
- struct msgdma_extended_desc * desc ;
197
+ void __iomem * desc ;
211
198
212
199
/* mSGDMA response */
213
- struct msgdma_response * resp ;
200
+ void __iomem * resp ;
214
201
};
215
202
216
203
#define to_mdev (chan ) container_of(chan, struct msgdma_device, dmachan)
@@ -484,35 +471,36 @@ static void msgdma_reset(struct msgdma_device *mdev)
484
471
int ret ;
485
472
486
473
/* Reset mSGDMA */
487
- iowrite32 (MSGDMA_CSR_STAT_MASK , & mdev -> csr -> status );
488
- iowrite32 (MSGDMA_CSR_CTL_RESET , & mdev -> csr -> control );
474
+ iowrite32 (MSGDMA_CSR_STAT_MASK , mdev -> csr + MSGDMA_CSR_STATUS );
475
+ iowrite32 (MSGDMA_CSR_CTL_RESET , mdev -> csr + MSGDMA_CSR_CONTROL );
489
476
490
- ret = readl_poll_timeout (& mdev -> csr -> status , val ,
477
+ ret = readl_poll_timeout (mdev -> csr + MSGDMA_CSR_STATUS , val ,
491
478
(val & MSGDMA_CSR_STAT_RESETTING ) == 0 ,
492
479
1 , 10000 );
493
480
if (ret )
494
481
dev_err (mdev -> dev , "DMA channel did not reset\n" );
495
482
496
483
/* Clear all status bits */
497
- iowrite32 (MSGDMA_CSR_STAT_MASK , & mdev -> csr -> status );
484
+ iowrite32 (MSGDMA_CSR_STAT_MASK , mdev -> csr + MSGDMA_CSR_STATUS );
498
485
499
486
/* Enable the DMA controller including interrupts */
500
487
iowrite32 (MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
501
- MSGDMA_CSR_CTL_GLOBAL_INTR , & mdev -> csr -> control );
488
+ MSGDMA_CSR_CTL_GLOBAL_INTR , mdev -> csr + MSGDMA_CSR_CONTROL );
502
489
503
490
mdev -> idle = true;
504
491
};
505
492
506
493
static void msgdma_copy_one (struct msgdma_device * mdev ,
507
494
struct msgdma_sw_desc * desc )
508
495
{
509
- struct msgdma_extended_desc * hw_desc = mdev -> desc ;
496
+ void __iomem * hw_desc = mdev -> desc ;
510
497
511
498
/*
512
499
* Check if the DESC FIFO it not full. If its full, we need to wait
513
500
* for at least one entry to become free again
514
501
*/
515
- while (ioread32 (& mdev -> csr -> status ) & MSGDMA_CSR_STAT_DESC_BUF_FULL )
502
+ while (ioread32 (mdev -> csr + MSGDMA_CSR_STATUS ) &
503
+ MSGDMA_CSR_STAT_DESC_BUF_FULL )
516
504
mdelay (1 );
517
505
518
506
/*
@@ -524,12 +512,14 @@ static void msgdma_copy_one(struct msgdma_device *mdev,
524
512
* sure this control word is written last by single coding it and
525
513
* adding some write-barriers here.
526
514
*/
527
- memcpy (hw_desc , & desc -> hw_desc , sizeof (desc -> hw_desc ) - sizeof (u32 ));
515
+ memcpy ((void __force * )hw_desc , & desc -> hw_desc ,
516
+ sizeof (desc -> hw_desc ) - sizeof (u32 ));
528
517
529
518
/* Write control word last to flush this descriptor into the FIFO */
530
519
mdev -> idle = false;
531
520
wmb ();
532
- iowrite32 (desc -> hw_desc .control , & hw_desc -> control );
521
+ iowrite32 (desc -> hw_desc .control , hw_desc +
522
+ offsetof(struct msgdma_extended_desc , control ));
533
523
wmb ();
534
524
}
535
525
@@ -690,13 +680,13 @@ static void msgdma_tasklet(unsigned long data)
690
680
{
691
681
struct msgdma_device * mdev = (struct msgdma_device * )data ;
692
682
u32 count ;
693
- u32 size ;
694
- u32 status ;
683
+ u32 __maybe_unused size ;
684
+ u32 __maybe_unused status ;
695
685
696
686
spin_lock (& mdev -> lock );
697
687
698
688
/* Read number of responses that are available */
699
- count = ioread32 (& mdev -> csr -> resp_fill_level );
689
+ count = ioread32 (mdev -> csr + MSGDMA_CSR_RESP_FILL_LEVEL );
700
690
dev_dbg (mdev -> dev , "%s (%d): response count=%d\n" ,
701
691
__func__ , __LINE__ , count );
702
692
@@ -707,8 +697,8 @@ static void msgdma_tasklet(unsigned long data)
707
697
* have any real values, like transferred bytes or error
708
698
* bits. So we need to just drop these values.
709
699
*/
710
- size = ioread32 (& mdev -> resp -> bytes_transferred );
711
- status = ioread32 (& mdev -> resp -> status );
700
+ size = ioread32 (mdev -> resp + MSGDMA_RESP_BYTES_TRANSFERRED );
701
+ status = ioread32 (mdev -> resp - MSGDMA_RESP_STATUS );
712
702
713
703
msgdma_complete_descriptor (mdev );
714
704
msgdma_chan_desc_cleanup (mdev );
@@ -729,7 +719,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
729
719
struct msgdma_device * mdev = data ;
730
720
u32 status ;
731
721
732
- status = ioread32 (& mdev -> csr -> status );
722
+ status = ioread32 (mdev -> csr + MSGDMA_CSR_STATUS );
733
723
if ((status & MSGDMA_CSR_STAT_BUSY ) == 0 ) {
734
724
/* Start next transfer if the DMA controller is idle */
735
725
spin_lock (& mdev -> lock );
@@ -741,7 +731,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
741
731
tasklet_schedule (& mdev -> irq_tasklet );
742
732
743
733
/* Clear interrupt in mSGDMA controller */
744
- iowrite32 (MSGDMA_CSR_STAT_IRQ , & mdev -> csr -> status );
734
+ iowrite32 (MSGDMA_CSR_STAT_IRQ , mdev -> csr + MSGDMA_CSR_STATUS );
745
735
746
736
return IRQ_HANDLED ;
747
737
}
@@ -809,17 +799,17 @@ static int msgdma_probe(struct platform_device *pdev)
809
799
mdev -> dev = & pdev -> dev ;
810
800
811
801
/* Map CSR space */
812
- ret = request_and_map (pdev , "csr" , & dma_res , ( void * * ) & mdev -> csr );
802
+ ret = request_and_map (pdev , "csr" , & dma_res , & mdev -> csr );
813
803
if (ret )
814
804
return ret ;
815
805
816
806
/* Map (extended) descriptor space */
817
- ret = request_and_map (pdev , "desc" , & dma_res , ( void * * ) & mdev -> desc );
807
+ ret = request_and_map (pdev , "desc" , & dma_res , & mdev -> desc );
818
808
if (ret )
819
809
return ret ;
820
810
821
811
/* Map response space */
822
- ret = request_and_map (pdev , "resp" , & dma_res , ( void * * ) & mdev -> resp );
812
+ ret = request_and_map (pdev , "resp" , & dma_res , & mdev -> resp );
823
813
if (ret )
824
814
return ret ;
825
815
0 commit comments