Skip to content

Commit cef55b5

Browse files
committed
Merge tag 'dma-mapping-4.13-2' of git://git.infradead.org/users/hch/dma-mapping
Pull dma mapping fixes from Christoph Hellwig: "split the global dma coherent pool from the per-device pool. This fixes a regression in the earlier 4.13 pull requests where the global pool would override a per-device CMA pool (Vladimir Murzin)" * tag 'dma-mapping-4.13-2' of git://git.infradead.org/users/hch/dma-mapping: ARM: NOMMU: Wire-up default DMA interface dma-coherent: introduce interface for default DMA pool
2 parents 25f6a53 + 878ec36 commit cef55b5

File tree

8 files changed

+180
-81
lines changed

8 files changed

+180
-81
lines changed

arch/arc/mm/dma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
117117

118118
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119119

120-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
120+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
121121
return ret;
122122

123123
if (off < count && user_count <= (count - off)) {

arch/arm/mm/dma-mapping-nommu.c

Lines changed: 36 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,21 +40,30 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
4040

4141
{
4242
const struct dma_map_ops *ops = &dma_noop_ops;
43+
void *ret;
4344

4445
/*
45-
* We are here because:
46+
* Try generic allocator first if we are advertised that
47+
* consistency is not required.
48+
*/
49+
50+
if (attrs & DMA_ATTR_NON_CONSISTENT)
51+
return ops->alloc(dev, size, dma_handle, gfp, attrs);
52+
53+
ret = dma_alloc_from_global_coherent(size, dma_handle);
54+
55+
/*
56+
* dma_alloc_from_global_coherent() may fail because:
57+
*
4658
* - no consistent DMA region has been defined, so we can't
4759
* continue.
4860
* - there is no space left in consistent DMA region, so we
4961
* only can fallback to generic allocator if we are
5062
* advertised that consistency is not required.
5163
*/
5264

53-
if (attrs & DMA_ATTR_NON_CONSISTENT)
54-
return ops->alloc(dev, size, dma_handle, gfp, attrs);
55-
56-
WARN_ON_ONCE(1);
57-
return NULL;
65+
WARN_ON_ONCE(ret == NULL);
66+
return ret;
5867
}
5968

6069
static void arm_nommu_dma_free(struct device *dev, size_t size,
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
6372
{
6473
const struct dma_map_ops *ops = &dma_noop_ops;
6574

66-
if (attrs & DMA_ATTR_NON_CONSISTENT)
75+
if (attrs & DMA_ATTR_NON_CONSISTENT) {
6776
ops->free(dev, size, cpu_addr, dma_addr, attrs);
68-
else
69-
WARN_ON_ONCE(1);
77+
} else {
78+
int ret = dma_release_from_global_coherent(get_order(size),
79+
cpu_addr);
80+
81+
WARN_ON_ONCE(ret == 0);
82+
}
7083

7184
return;
7285
}
7386

87+
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
89+
unsigned long attrs)
90+
{
91+
int ret;
92+
93+
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
94+
return ret;
95+
96+
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
97+
}
98+
99+
74100
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
75101
enum dma_data_direction dir)
76102
{
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
173199
const struct dma_map_ops arm_nommu_dma_ops = {
174200
.alloc = arm_nommu_dma_alloc,
175201
.free = arm_nommu_dma_free,
202+
.mmap = arm_nommu_dma_mmap,
176203
.map_page = arm_nommu_dma_map_page,
177204
.unmap_page = arm_nommu_dma_unmap_page,
178205
.map_sg = arm_nommu_dma_map_sg,

arch/arm/mm/dma-mapping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
851851
unsigned long pfn = dma_to_pfn(dev, dma_addr);
852852
unsigned long off = vma->vm_pgoff;
853853

854-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
854+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
855855
return ret;
856856

857857
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {

arch/arm64/mm/dma-mapping.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev,
329329
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
330330
is_device_dma_coherent(dev));
331331

332-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
332+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
333333
return ret;
334334

335335
return __swiotlb_mmap_pfn(vma, pfn, size);
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
706706
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
707707
is_device_dma_coherent(dev));
708708

709-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
709+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
710710
return ret;
711711

712712
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {

arch/mips/mm/dma-default.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
232232
else
233233
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
234234

235-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
235+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
236236
return ret;
237237

238238
if (off < count && user_count <= (count - off)) {

drivers/base/dma-coherent.c

Lines changed: 106 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
2525
{
2626
if (dev && dev->dma_mem)
2727
return dev->dma_mem;
28-
return dma_coherent_default_memory;
28+
return NULL;
2929
}
3030

3131
static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
165165
}
166166
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
167167

168-
/**
169-
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
170-
*
171-
* @dev: device from which we allocate memory
172-
* @size: size of requested memory area
173-
* @dma_handle: This will be filled with the correct dma handle
174-
* @ret: This pointer will be filled with the virtual address
175-
* to allocated area.
176-
*
177-
* This function should be only called from per-arch dma_alloc_coherent()
178-
* to support allocation from per-device coherent memory pools.
179-
*
180-
* Returns 0 if dma_alloc_coherent should continue with allocating from
181-
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182-
*/
183-
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
184-
dma_addr_t *dma_handle, void **ret)
168+
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
169+
ssize_t size, dma_addr_t *dma_handle)
185170
{
186-
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
187171
int order = get_order(size);
188172
unsigned long flags;
189173
int pageno;
190174
int dma_memory_map;
175+
void *ret;
191176

192-
if (!mem)
193-
return 0;
194-
195-
*ret = NULL;
196177
spin_lock_irqsave(&mem->spinlock, flags);
197178

198179
if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,47 +184,71 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
203184
goto err;
204185

205186
/*
206-
* Memory was found in the per-device area.
187+
* Memory was found in the coherent area.
207188
*/
208-
*dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
209-
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
189+
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
190+
ret = mem->virt_base + (pageno << PAGE_SHIFT);
210191
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
211192
spin_unlock_irqrestore(&mem->spinlock, flags);
212193
if (dma_memory_map)
213-
memset(*ret, 0, size);
194+
memset(ret, 0, size);
214195
else
215-
memset_io(*ret, 0, size);
196+
memset_io(ret, 0, size);
216197

217-
return 1;
198+
return ret;
218199

219200
err:
220201
spin_unlock_irqrestore(&mem->spinlock, flags);
202+
return NULL;
203+
}
204+
205+
/**
206+
* dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
207+
* @dev: device from which we allocate memory
208+
* @size: size of requested memory area
209+
* @dma_handle: This will be filled with the correct dma handle
210+
* @ret: This pointer will be filled with the virtual address
211+
* to allocated area.
212+
*
213+
* This function should be only called from per-arch dma_alloc_coherent()
214+
* to support allocation from per-device coherent memory pools.
215+
*
216+
* Returns 0 if dma_alloc_coherent should continue with allocating from
217+
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
218+
*/
219+
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
220+
dma_addr_t *dma_handle, void **ret)
221+
{
222+
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
223+
224+
if (!mem)
225+
return 0;
226+
227+
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
228+
if (*ret)
229+
return 1;
230+
221231
/*
222232
* In the case where the allocation can not be satisfied from the
223233
* per-device area, try to fall back to generic memory if the
224234
* constraints allow it.
225235
*/
226236
return mem->flags & DMA_MEMORY_EXCLUSIVE;
227237
}
228-
EXPORT_SYMBOL(dma_alloc_from_coherent);
238+
EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
229239

230-
/**
231-
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
232-
* @dev: device from which the memory was allocated
233-
* @order: the order of pages allocated
234-
* @vaddr: virtual address of allocated pages
235-
*
236-
* This checks whether the memory was allocated from the per-device
237-
* coherent memory pool and if so, releases that memory.
238-
*
239-
* Returns 1 if we correctly released the memory, or 0 if
240-
* dma_release_coherent() should proceed with releasing memory from
241-
* generic pools.
242-
*/
243-
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
240+
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
244241
{
245-
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
242+
if (!dma_coherent_default_memory)
243+
return NULL;
244+
245+
return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
246+
dma_handle);
247+
}
246248

249+
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
250+
int order, void *vaddr)
251+
{
247252
if (mem && vaddr >= mem->virt_base && vaddr <
248253
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
249254
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
256261
}
257262
return 0;
258263
}
259-
EXPORT_SYMBOL(dma_release_from_coherent);
260264

261265
/**
262-
* dma_mmap_from_coherent() - try to mmap the memory allocated from
263-
* per-device coherent memory pool to userspace
266+
* dma_release_from_dev_coherent() - free memory to device coherent memory pool
264267
* @dev: device from which the memory was allocated
265-
* @vma: vm_area for the userspace memory
266-
* @vaddr: cpu address returned by dma_alloc_from_coherent
267-
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
268-
* @ret: result from remap_pfn_range()
268+
* @order: the order of pages allocated
269+
* @vaddr: virtual address of allocated pages
269270
*
270271
* This checks whether the memory was allocated from the per-device
271-
* coherent memory pool and if so, maps that memory to the provided vma.
272+
* coherent memory pool and if so, releases that memory.
272273
*
273-
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
274-
* proceed with mapping memory from generic pools.
274+
* Returns 1 if we correctly released the memory, or 0 if the caller should
275+
* proceed with releasing memory from generic pools.
275276
*/
276-
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
277-
void *vaddr, size_t size, int *ret)
277+
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
278278
{
279279
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
280280

281+
return __dma_release_from_coherent(mem, order, vaddr);
282+
}
283+
EXPORT_SYMBOL(dma_release_from_dev_coherent);
284+
285+
int dma_release_from_global_coherent(int order, void *vaddr)
286+
{
287+
if (!dma_coherent_default_memory)
288+
return 0;
289+
290+
return __dma_release_from_coherent(dma_coherent_default_memory, order,
291+
vaddr);
292+
}
293+
294+
static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
295+
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
296+
{
281297
if (mem && vaddr >= mem->virt_base && vaddr + size <=
282298
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
283299
unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
296312
}
297313
return 0;
298314
}
299-
EXPORT_SYMBOL(dma_mmap_from_coherent);
315+
316+
/**
317+
* dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
318+
* @dev: device from which the memory was allocated
319+
* @vma: vm_area for the userspace memory
320+
* @vaddr: cpu address returned by dma_alloc_from_dev_coherent
321+
* @size: size of the memory buffer allocated
322+
* @ret: result from remap_pfn_range()
323+
*
324+
* This checks whether the memory was allocated from the per-device
325+
* coherent memory pool and if so, maps that memory to the provided vma.
326+
*
327+
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
328+
* proceed with mapping memory from generic pools.
329+
*/
330+
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
331+
void *vaddr, size_t size, int *ret)
332+
{
333+
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
334+
335+
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
336+
}
337+
EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
338+
339+
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
340+
size_t size, int *ret)
341+
{
342+
if (!dma_coherent_default_memory)
343+
return 0;
344+
345+
return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
346+
vaddr, size, ret);
347+
}
300348

301349
/*
302350
* Support for reserved memory regions defined in device tree

drivers/base/dma-mapping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
235235

236236
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237237

238-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
238+
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
239239
return ret;
240240

241241
if (off < count && user_count <= (count - off)) {

0 commit comments

Comments
 (0)