12
12
13
13
#include <linux/types.h>
14
14
#include <linux/mm.h>
15
- #include <linux/export.h>
16
15
#include <linux/string.h>
17
- #include <linux/scatterlist.h>
18
16
#include <linux/dma-mapping.h>
19
17
#include <linux/io.h>
20
18
#include <linux/cache.h>
21
19
#include <asm/cacheflush.h>
22
20
23
- static inline void __dma_sync_for_device ( void * vaddr , size_t size ,
24
- enum dma_data_direction direction )
21
+ void arch_sync_dma_for_device ( struct device * dev , phys_addr_t paddr ,
22
+ size_t size , enum dma_data_direction dir )
25
23
{
26
- switch (direction ) {
24
+ void * vaddr = phys_to_virt (paddr );
25
+
26
+ switch (dir ) {
27
27
case DMA_FROM_DEVICE :
28
28
invalidate_dcache_range ((unsigned long )vaddr ,
29
29
(unsigned long )(vaddr + size ));
@@ -42,10 +42,12 @@ static inline void __dma_sync_for_device(void *vaddr, size_t size,
42
42
}
43
43
}
44
44
45
- static inline void __dma_sync_for_cpu ( void * vaddr , size_t size ,
46
- enum dma_data_direction direction )
45
+ void arch_sync_dma_for_cpu ( struct device * dev , phys_addr_t paddr ,
46
+ size_t size , enum dma_data_direction dir )
47
47
{
48
- switch (direction ) {
48
+ void * vaddr = phys_to_virt (paddr );
49
+
50
+ switch (dir ) {
49
51
case DMA_BIDIRECTIONAL :
50
52
case DMA_FROM_DEVICE :
51
53
invalidate_dcache_range ((unsigned long )vaddr ,
@@ -58,8 +60,8 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
58
60
}
59
61
}
60
62
61
- static void * nios2_dma_alloc (struct device * dev , size_t size ,
62
- dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
63
+ void * arch_dma_alloc (struct device * dev , size_t size , dma_addr_t * dma_handle ,
64
+ gfp_t gfp , unsigned long attrs )
63
65
{
64
66
void * ret ;
65
67
@@ -80,125 +82,10 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
80
82
return ret ;
81
83
}
82
84
83
- static void nios2_dma_free (struct device * dev , size_t size , void * vaddr ,
85
+ void arch_dma_free (struct device * dev , size_t size , void * vaddr ,
84
86
dma_addr_t dma_handle , unsigned long attrs )
85
87
{
86
88
unsigned long addr = (unsigned long ) CAC_ADDR ((unsigned long ) vaddr );
87
89
88
90
free_pages (addr , get_order (size ));
89
91
}
90
-
91
- static int nios2_dma_map_sg (struct device * dev , struct scatterlist * sg ,
92
- int nents , enum dma_data_direction direction ,
93
- unsigned long attrs )
94
- {
95
- int i ;
96
-
97
- for_each_sg (sg , sg , nents , i ) {
98
- void * addr = sg_virt (sg );
99
-
100
- if (!addr )
101
- continue ;
102
-
103
- sg -> dma_address = sg_phys (sg );
104
-
105
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC )
106
- continue ;
107
-
108
- __dma_sync_for_device (addr , sg -> length , direction );
109
- }
110
-
111
- return nents ;
112
- }
113
-
114
- static dma_addr_t nios2_dma_map_page (struct device * dev , struct page * page ,
115
- unsigned long offset , size_t size ,
116
- enum dma_data_direction direction ,
117
- unsigned long attrs )
118
- {
119
- void * addr = page_address (page ) + offset ;
120
-
121
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
122
- __dma_sync_for_device (addr , size , direction );
123
-
124
- return page_to_phys (page ) + offset ;
125
- }
126
-
127
- static void nios2_dma_unmap_page (struct device * dev , dma_addr_t dma_address ,
128
- size_t size , enum dma_data_direction direction ,
129
- unsigned long attrs )
130
- {
131
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
132
- __dma_sync_for_cpu (phys_to_virt (dma_address ), size , direction );
133
- }
134
-
135
- static void nios2_dma_unmap_sg (struct device * dev , struct scatterlist * sg ,
136
- int nhwentries , enum dma_data_direction direction ,
137
- unsigned long attrs )
138
- {
139
- void * addr ;
140
- int i ;
141
-
142
- if (direction == DMA_TO_DEVICE )
143
- return ;
144
-
145
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC )
146
- return ;
147
-
148
- for_each_sg (sg , sg , nhwentries , i ) {
149
- addr = sg_virt (sg );
150
- if (addr )
151
- __dma_sync_for_cpu (addr , sg -> length , direction );
152
- }
153
- }
154
-
155
- static void nios2_dma_sync_single_for_cpu (struct device * dev ,
156
- dma_addr_t dma_handle , size_t size ,
157
- enum dma_data_direction direction )
158
- {
159
- __dma_sync_for_cpu (phys_to_virt (dma_handle ), size , direction );
160
- }
161
-
162
- static void nios2_dma_sync_single_for_device (struct device * dev ,
163
- dma_addr_t dma_handle , size_t size ,
164
- enum dma_data_direction direction )
165
- {
166
- __dma_sync_for_device (phys_to_virt (dma_handle ), size , direction );
167
- }
168
-
169
- static void nios2_dma_sync_sg_for_cpu (struct device * dev ,
170
- struct scatterlist * sg , int nelems ,
171
- enum dma_data_direction direction )
172
- {
173
- int i ;
174
-
175
- /* Make sure that gcc doesn't leave the empty loop body. */
176
- for_each_sg (sg , sg , nelems , i )
177
- __dma_sync_for_cpu (sg_virt (sg ), sg -> length , direction );
178
- }
179
-
180
- static void nios2_dma_sync_sg_for_device (struct device * dev ,
181
- struct scatterlist * sg , int nelems ,
182
- enum dma_data_direction direction )
183
- {
184
- int i ;
185
-
186
- /* Make sure that gcc doesn't leave the empty loop body. */
187
- for_each_sg (sg , sg , nelems , i )
188
- __dma_sync_for_device (sg_virt (sg ), sg -> length , direction );
189
-
190
- }
191
-
192
- const struct dma_map_ops nios2_dma_ops = {
193
- .alloc = nios2_dma_alloc ,
194
- .free = nios2_dma_free ,
195
- .map_page = nios2_dma_map_page ,
196
- .unmap_page = nios2_dma_unmap_page ,
197
- .map_sg = nios2_dma_map_sg ,
198
- .unmap_sg = nios2_dma_unmap_sg ,
199
- .sync_single_for_device = nios2_dma_sync_single_for_device ,
200
- .sync_single_for_cpu = nios2_dma_sync_single_for_cpu ,
201
- .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu ,
202
- .sync_sg_for_device = nios2_dma_sync_sg_for_device ,
203
- };
204
- EXPORT_SYMBOL (nios2_dma_ops );
0 commit comments