Skip to content

Commit f3ed88a

Browse files
committed
Merge branch 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "This contains important fixes for recently introduced highmem support for default contiguous memory region used for dma-mapping subsystem" * 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: mm, cma: make parameters order consistent in func declaration and definition mm: cma: Use %pa to print physical addresses mm: cma: Ensure that reservations never cross the low/high mem boundary mm: cma: Always consider a 0 base address reservation as dynamic mm: cma: Don't crash on allocation if CMA area can't be activated
2 parents ce1928d + dda02fd commit f3ed88a

File tree

2 files changed

+48
-28
lines changed

2 files changed

+48
-28
lines changed

include/linux/cma.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@ struct cma;
1818
extern phys_addr_t cma_get_base(struct cma *cma);
1919
extern unsigned long cma_get_size(struct cma *cma);
2020

21-
extern int __init cma_declare_contiguous(phys_addr_t size,
22-
phys_addr_t base, phys_addr_t limit,
21+
extern int __init cma_declare_contiguous(phys_addr_t base,
22+
phys_addr_t size, phys_addr_t limit,
2323
phys_addr_t alignment, unsigned int order_per_bit,
2424
bool fixed, struct cma **res_cma);
25-
extern int cma_init_reserved_mem(phys_addr_t size,
26-
phys_addr_t base, int order_per_bit,
25+
extern int cma_init_reserved_mem(phys_addr_t base,
26+
phys_addr_t size, int order_per_bit,
2727
struct cma **res_cma);
2828
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
2929
extern bool cma_release(struct cma *cma, struct page *pages, int count);

mm/cma.c

Lines changed: 44 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma)
124124

125125
err:
126126
kfree(cma->bitmap);
127+
cma->count = 0;
127128
return -EINVAL;
128129
}
129130

@@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
217218
phys_addr_t highmem_start = __pa(high_memory);
218219
int ret = 0;
219220

220-
pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
221-
__func__, (unsigned long)size, (unsigned long)base,
222-
(unsigned long)limit, (unsigned long)alignment);
221+
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
222+
__func__, &size, &base, &limit, &alignment);
223223

224224
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
225225
pr_err("Not enough slots for CMA reserved regions!\n");
@@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base,
244244
size = ALIGN(size, alignment);
245245
limit &= ~(alignment - 1);
246246

247+
if (!base)
248+
fixed = false;
249+
247250
/* size should be aligned with order_per_bit */
248251
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
249252
return -EINVAL;
250253

251254
/*
252-
* adjust limit to avoid crossing low/high memory boundary for
253-
* automatically allocated regions
255+
* If allocating at a fixed base the request region must not cross the
256+
* low/high memory boundary.
254257
*/
255-
if (((limit == 0 || limit > memblock_end) &&
256-
(memblock_end - size < highmem_start &&
257-
memblock_end > highmem_start)) ||
258-
(!fixed && limit > highmem_start && limit - size < highmem_start)) {
259-
limit = highmem_start;
260-
}
261-
262-
if (fixed && base < highmem_start && base+size > highmem_start) {
258+
if (fixed && base < highmem_start && base + size > highmem_start) {
263259
ret = -EINVAL;
264-
pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
265-
(unsigned long)base, (unsigned long)highmem_start);
260+
pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
261+
&base, &highmem_start);
266262
goto err;
267263
}
268264

265+
/*
266+
* If the limit is unspecified or above the memblock end, its effective
267+
* value will be the memblock end. Set it explicitly to simplify further
268+
* checks.
269+
*/
270+
if (limit == 0 || limit > memblock_end)
271+
limit = memblock_end;
272+
269273
/* Reserve memory */
270-
if (base && fixed) {
274+
if (fixed) {
271275
if (memblock_is_region_reserved(base, size) ||
272276
memblock_reserve(base, size) < 0) {
273277
ret = -EBUSY;
274278
goto err;
275279
}
276280
} else {
277-
phys_addr_t addr = memblock_alloc_range(size, alignment, base,
278-
limit);
281+
phys_addr_t addr = 0;
282+
283+
/*
284+
* All pages in the reserved area must come from the same zone.
285+
* If the requested region crosses the low/high memory boundary,
286+
* try allocating from high memory first and fall back to low
287+
* memory in case of failure.
288+
*/
289+
if (base < highmem_start && limit > highmem_start) {
290+
addr = memblock_alloc_range(size, alignment,
291+
highmem_start, limit);
292+
limit = highmem_start;
293+
}
294+
279295
if (!addr) {
280-
ret = -ENOMEM;
281-
goto err;
282-
} else {
283-
base = addr;
296+
addr = memblock_alloc_range(size, alignment, base,
297+
limit);
298+
if (!addr) {
299+
ret = -ENOMEM;
300+
goto err;
301+
}
284302
}
303+
304+
base = addr;
285305
}
286306

287307
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
288308
if (ret)
289309
goto err;
290310

291-
pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
292-
(unsigned long)base);
311+
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
312+
&base);
293313
return 0;
294314

295315
err:

0 commit comments

Comments
 (0)