17
17
18
18
#define NOT_COHERENT_CACHE
19
19
20
- static void * dma_direct_alloc_coherent (struct device * dev , size_t size ,
20
+ static void * dma_nommu_alloc_coherent (struct device * dev , size_t size ,
21
21
dma_addr_t * dma_handle , gfp_t flag ,
22
22
unsigned long attrs )
23
23
{
@@ -42,7 +42,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
42
42
#endif
43
43
}
44
44
45
- static void dma_direct_free_coherent (struct device * dev , size_t size ,
45
+ static void dma_nommu_free_coherent (struct device * dev , size_t size ,
46
46
void * vaddr , dma_addr_t dma_handle ,
47
47
unsigned long attrs )
48
48
{
@@ -69,7 +69,7 @@ static inline void __dma_sync(unsigned long paddr,
69
69
}
70
70
}
71
71
72
- static int dma_direct_map_sg (struct device * dev , struct scatterlist * sgl ,
72
+ static int dma_nommu_map_sg (struct device * dev , struct scatterlist * sgl ,
73
73
int nents , enum dma_data_direction direction ,
74
74
unsigned long attrs )
75
75
{
@@ -89,12 +89,12 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
89
89
return nents ;
90
90
}
91
91
92
- static int dma_direct_dma_supported (struct device * dev , u64 mask )
92
+ static int dma_nommu_dma_supported (struct device * dev , u64 mask )
93
93
{
94
94
return 1 ;
95
95
}
96
96
97
- static inline dma_addr_t dma_direct_map_page (struct device * dev ,
97
+ static inline dma_addr_t dma_nommu_map_page (struct device * dev ,
98
98
struct page * page ,
99
99
unsigned long offset ,
100
100
size_t size ,
@@ -106,7 +106,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
106
106
return page_to_phys (page ) + offset ;
107
107
}
108
108
109
- static inline void dma_direct_unmap_page (struct device * dev ,
109
+ static inline void dma_nommu_unmap_page (struct device * dev ,
110
110
dma_addr_t dma_address ,
111
111
size_t size ,
112
112
enum dma_data_direction direction ,
@@ -122,7 +122,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
122
122
}
123
123
124
124
static inline void
125
- dma_direct_sync_single_for_cpu (struct device * dev ,
125
+ dma_nommu_sync_single_for_cpu (struct device * dev ,
126
126
dma_addr_t dma_handle , size_t size ,
127
127
enum dma_data_direction direction )
128
128
{
@@ -136,7 +136,7 @@ dma_direct_sync_single_for_cpu(struct device *dev,
136
136
}
137
137
138
138
static inline void
139
- dma_direct_sync_single_for_device (struct device * dev ,
139
+ dma_nommu_sync_single_for_device (struct device * dev ,
140
140
dma_addr_t dma_handle , size_t size ,
141
141
enum dma_data_direction direction )
142
142
{
@@ -150,7 +150,7 @@ dma_direct_sync_single_for_device(struct device *dev,
150
150
}
151
151
152
152
static inline void
153
- dma_direct_sync_sg_for_cpu (struct device * dev ,
153
+ dma_nommu_sync_sg_for_cpu (struct device * dev ,
154
154
struct scatterlist * sgl , int nents ,
155
155
enum dma_data_direction direction )
156
156
{
@@ -164,7 +164,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev,
164
164
}
165
165
166
166
static inline void
167
- dma_direct_sync_sg_for_device (struct device * dev ,
167
+ dma_nommu_sync_sg_for_device (struct device * dev ,
168
168
struct scatterlist * sgl , int nents ,
169
169
enum dma_data_direction direction )
170
170
{
@@ -178,7 +178,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
178
178
}
179
179
180
180
static
181
- int dma_direct_mmap_coherent (struct device * dev , struct vm_area_struct * vma ,
181
+ int dma_nommu_mmap_coherent (struct device * dev , struct vm_area_struct * vma ,
182
182
void * cpu_addr , dma_addr_t handle , size_t size ,
183
183
unsigned long attrs )
184
184
{
@@ -204,20 +204,20 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
204
204
#endif
205
205
}
206
206
207
- const struct dma_map_ops dma_direct_ops = {
208
- .alloc = dma_direct_alloc_coherent ,
209
- .free = dma_direct_free_coherent ,
210
- .mmap = dma_direct_mmap_coherent ,
211
- .map_sg = dma_direct_map_sg ,
212
- .dma_supported = dma_direct_dma_supported ,
213
- .map_page = dma_direct_map_page ,
214
- .unmap_page = dma_direct_unmap_page ,
215
- .sync_single_for_cpu = dma_direct_sync_single_for_cpu ,
216
- .sync_single_for_device = dma_direct_sync_single_for_device ,
217
- .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu ,
218
- .sync_sg_for_device = dma_direct_sync_sg_for_device ,
207
+ const struct dma_map_ops dma_nommu_ops = {
208
+ .alloc = dma_nommu_alloc_coherent ,
209
+ .free = dma_nommu_free_coherent ,
210
+ .mmap = dma_nommu_mmap_coherent ,
211
+ .map_sg = dma_nommu_map_sg ,
212
+ .dma_supported = dma_nommu_dma_supported ,
213
+ .map_page = dma_nommu_map_page ,
214
+ .unmap_page = dma_nommu_unmap_page ,
215
+ .sync_single_for_cpu = dma_nommu_sync_single_for_cpu ,
216
+ .sync_single_for_device = dma_nommu_sync_single_for_device ,
217
+ .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu ,
218
+ .sync_sg_for_device = dma_nommu_sync_sg_for_device ,
219
219
};
220
- EXPORT_SYMBOL (dma_direct_ops );
220
+ EXPORT_SYMBOL (dma_nommu_ops );
221
221
222
222
/* Number of entries preallocated for DMA-API debugging */
223
223
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
0 commit comments