Skip to content

Commit e1171ac

Browse files
committed
Merge tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa
Pull Xtensa fixes from Max Filippov: "Two fixes for reserved memory/DMA buffers allocation in high memory on xtensa architecture - fix memory accounting when reserved memory is in high memory region - fix DMA allocation from high memory" * tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa: xtensa: support DMA buffers in high memory xtensa: fix high memory/reserved memory collision
2 parents c23a757 + 6137e41 commit e1171ac

File tree

2 files changed

+93
-17
lines changed

2 files changed

+93
-17
lines changed

arch/xtensa/kernel/pci-dma.c

Lines changed: 30 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
*/
1717

1818
#include <linux/dma-contiguous.h>
19+
#include <linux/dma-direct.h>
1920
#include <linux/gfp.h>
2021
#include <linux/highmem.h>
2122
#include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
123124
unsigned long attrs)
124125
{
125126
unsigned long ret;
126-
unsigned long uncached = 0;
127+
unsigned long uncached;
127128
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
128129
struct page *page = NULL;
129130

@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
144145
if (!page)
145146
return NULL;
146147

147-
ret = (unsigned long)page_address(page);
148+
*handle = phys_to_dma(dev, page_to_phys(page));
148149

149-
/* We currently don't support coherent memory outside KSEG */
150+
#ifdef CONFIG_MMU
151+
if (PageHighMem(page)) {
152+
void *p;
150153

154+
p = dma_common_contiguous_remap(page, size, VM_MAP,
155+
pgprot_noncached(PAGE_KERNEL),
156+
__builtin_return_address(0));
157+
if (!p) {
158+
if (!dma_release_from_contiguous(dev, page, count))
159+
__free_pages(page, get_order(size));
160+
}
161+
return p;
162+
}
163+
#endif
164+
ret = (unsigned long)page_address(page);
151165
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
152166
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
153167

154168
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
155-
*handle = virt_to_bus((void *)ret);
156169
__invalidate_dcache_range(ret, size);
157170

158171
return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
161174
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
162175
dma_addr_t dma_handle, unsigned long attrs)
163176
{
164-
unsigned long addr = (unsigned long)vaddr +
165-
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
166-
struct page *page = virt_to_page(addr);
167177
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
168-
169-
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
170-
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
178+
unsigned long addr = (unsigned long)vaddr;
179+
struct page *page;
180+
181+
if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
182+
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
183+
addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
184+
page = virt_to_page(addr);
185+
} else {
186+
#ifdef CONFIG_MMU
187+
dma_common_free_remap(vaddr, size, VM_MAP);
188+
#endif
189+
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
190+
}
171191

172192
if (!dma_release_from_contiguous(dev, page, count))
173193
__free_pages(page, get_order(size));

arch/xtensa/mm/init.c

Lines changed: 63 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -79,19 +79,75 @@ void __init zones_init(void)
7979
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
8080
}
8181

82+
#ifdef CONFIG_HIGHMEM
83+
static void __init free_area_high(unsigned long pfn, unsigned long end)
84+
{
85+
for (; pfn < end; pfn++)
86+
free_highmem_page(pfn_to_page(pfn));
87+
}
88+
89+
static void __init free_highpages(void)
90+
{
91+
unsigned long max_low = max_low_pfn;
92+
struct memblock_region *mem, *res;
93+
94+
reset_all_zones_managed_pages();
95+
/* set highmem page free */
96+
for_each_memblock(memory, mem) {
97+
unsigned long start = memblock_region_memory_base_pfn(mem);
98+
unsigned long end = memblock_region_memory_end_pfn(mem);
99+
100+
/* Ignore complete lowmem entries */
101+
if (end <= max_low)
102+
continue;
103+
104+
if (memblock_is_nomap(mem))
105+
continue;
106+
107+
/* Truncate partial highmem entries */
108+
if (start < max_low)
109+
start = max_low;
110+
111+
/* Find and exclude any reserved regions */
112+
for_each_memblock(reserved, res) {
113+
unsigned long res_start, res_end;
114+
115+
res_start = memblock_region_reserved_base_pfn(res);
116+
res_end = memblock_region_reserved_end_pfn(res);
117+
118+
if (res_end < start)
119+
continue;
120+
if (res_start < start)
121+
res_start = start;
122+
if (res_start > end)
123+
res_start = end;
124+
if (res_end > end)
125+
res_end = end;
126+
if (res_start != start)
127+
free_area_high(start, res_start);
128+
start = res_end;
129+
if (start == end)
130+
break;
131+
}
132+
133+
/* And now free anything which remains */
134+
if (start < end)
135+
free_area_high(start, end);
136+
}
137+
}
138+
#else
139+
static void __init free_highpages(void)
140+
{
141+
}
142+
#endif
143+
82144
/*
83145
* Initialize memory pages.
84146
*/
85147

86148
void __init mem_init(void)
87149
{
88-
#ifdef CONFIG_HIGHMEM
89-
unsigned long tmp;
90-
91-
reset_all_zones_managed_pages();
92-
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
93-
free_highmem_page(pfn_to_page(tmp));
94-
#endif
150+
free_highpages();
95151

96152
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
97153
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);

0 commit comments

Comments
 (0)