Skip to content

Commit 80b29b6

Browse files
committed
Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux
Pull csky updates from Guo Ren: "This round of csky subsystem just some fixups: - Fix mb() synchronization problem - Fix dma_alloc_coherent with PAGE_SO attribute - Fix cache_op failed when cross memory ZONEs - Optimize arch_sync_dma_for_cpu/device with dma_inv_range - Fix ioremap function losing - Fix arch_get_unmapped_area() implementation - Fix defer cache flush for 610 - Support kernel non-aligned access - Fix 610 vipt cache flush mechanism - Fix add zero_fp fixup perf backtrace panic - Move static keyword to the front of declaration - Fix csky_pmu.max_period assignment - Use generic free_initrd_mem() - entry: Remove unneeded need_resched() loop" * tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux: csky: Move static keyword to the front of declaration csky: entry: Remove unneeded need_resched() loop csky: Fixup csky_pmu.max_period assignment csky: Fixup add zero_fp fixup perf backtrace panic csky: Use generic free_initrd_mem() csky: Fixup 610 vipt cache flush mechanism csky: Support kernel non-aligned access csky: Fixup defer cache flush for 610 csky: Fixup arch_get_unmapped_area() implementation csky: Fixup ioremap function losing csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range csky/dma: Fixup cache_op failed when cross memory ZONEs csky: Fixup dma_alloc_coherent with PAGE_SO attribute csky: Fixup mb() synchronization problem
2 parents cef0aa0 + 9af032a commit 80b29b6

File tree

17 files changed

+291
-212
lines changed

17 files changed

+291
-212
lines changed

arch/csky/abiv1/alignment.c

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@
55
#include <linux/uaccess.h>
66
#include <linux/ptrace.h>
77

8-
static int align_enable = 1;
9-
static int align_count;
8+
static int align_kern_enable = 1;
9+
static int align_usr_enable = 1;
10+
static int align_kern_count = 0;
11+
static int align_usr_count = 0;
1012

1113
static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
1214
{
@@ -32,9 +34,6 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
3234
uint32_t val;
3335
int err;
3436

35-
if (!access_ok((void *)addr, 1))
36-
return 1;
37-
3837
asm volatile (
3938
"movi %0, 0\n"
4039
"1:\n"
@@ -67,9 +66,6 @@ static int stb_asm(uint32_t addr, uint32_t val)
6766
{
6867
int err;
6968

70-
if (!access_ok((void *)addr, 1))
71-
return 1;
72-
7369
asm volatile (
7470
"movi %0, 0\n"
7571
"1:\n"
@@ -203,8 +199,6 @@ static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
203199
if (stb_asm(addr, byte3))
204200
return 1;
205201

206-
align_count++;
207-
208202
return 0;
209203
}
210204

@@ -226,14 +220,34 @@ void csky_alignment(struct pt_regs *regs)
226220
uint32_t addr = 0;
227221

228222
if (!user_mode(regs))
223+
goto kernel_area;
224+
225+
if (!align_usr_enable) {
226+
pr_err("%s user disabled.\n", __func__);
229227
goto bad_area;
228+
}
229+
230+
align_usr_count++;
230231

231232
ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
232233
if (ret) {
233234
pr_err("%s get_user failed.\n", __func__);
234235
goto bad_area;
235236
}
236237

238+
goto good_area;
239+
240+
kernel_area:
241+
if (!align_kern_enable) {
242+
pr_err("%s kernel disabled.\n", __func__);
243+
goto bad_area;
244+
}
245+
246+
align_kern_count++;
247+
248+
tmp = *(uint16_t *)instruction_pointer(regs);
249+
250+
good_area:
237251
opcode = (uint32_t)tmp;
238252

239253
rx = opcode & 0xf;
@@ -286,18 +300,32 @@ void csky_alignment(struct pt_regs *regs)
286300
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
287301
}
288302

289-
static struct ctl_table alignment_tbl[4] = {
303+
static struct ctl_table alignment_tbl[5] = {
304+
{
305+
.procname = "kernel_enable",
306+
.data = &align_kern_enable,
307+
.maxlen = sizeof(align_kern_enable),
308+
.mode = 0666,
309+
.proc_handler = &proc_dointvec
310+
},
311+
{
312+
.procname = "user_enable",
313+
.data = &align_usr_enable,
314+
.maxlen = sizeof(align_usr_enable),
315+
.mode = 0666,
316+
.proc_handler = &proc_dointvec
317+
},
290318
{
291-
.procname = "enable",
292-
.data = &align_enable,
293-
.maxlen = sizeof(align_enable),
319+
.procname = "kernel_count",
320+
.data = &align_kern_count,
321+
.maxlen = sizeof(align_kern_count),
294322
.mode = 0666,
295323
.proc_handler = &proc_dointvec
296324
},
297325
{
298-
.procname = "count",
299-
.data = &align_count,
300-
.maxlen = sizeof(align_count),
326+
.procname = "user_count",
327+
.data = &align_usr_count,
328+
.maxlen = sizeof(align_usr_count),
301329
.mode = 0666,
302330
.proc_handler = &proc_dointvec
303331
},

arch/csky/abiv1/cacheflush.c

Lines changed: 47 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -11,42 +11,66 @@
1111
#include <asm/cacheflush.h>
1212
#include <asm/cachectl.h>
1313

14+
#define PG_dcache_clean PG_arch_1
15+
1416
void flush_dcache_page(struct page *page)
1517
{
16-
struct address_space *mapping = page_mapping(page);
17-
unsigned long addr;
18+
struct address_space *mapping;
1819

19-
if (mapping && !mapping_mapped(mapping)) {
20-
set_bit(PG_arch_1, &(page)->flags);
20+
if (page == ZERO_PAGE(0))
2121
return;
22-
}
2322

24-
/*
25-
* We could delay the flush for the !page_mapping case too. But that
26-
* case is for exec env/arg pages and those are %99 certainly going to
27-
* get faulted into the tlb (and thus flushed) anyways.
28-
*/
29-
addr = (unsigned long) page_address(page);
30-
dcache_wb_range(addr, addr + PAGE_SIZE);
23+
mapping = page_mapping_file(page);
24+
25+
if (mapping && !page_mapcount(page))
26+
clear_bit(PG_dcache_clean, &page->flags);
27+
else {
28+
dcache_wbinv_all();
29+
if (mapping)
30+
icache_inv_all();
31+
set_bit(PG_dcache_clean, &page->flags);
32+
}
3133
}
34+
EXPORT_SYMBOL(flush_dcache_page);
3235

33-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
34-
pte_t *pte)
36+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
37+
pte_t *ptep)
3538
{
36-
unsigned long addr;
39+
unsigned long pfn = pte_pfn(*ptep);
3740
struct page *page;
38-
unsigned long pfn;
3941

40-
pfn = pte_pfn(*pte);
41-
if (unlikely(!pfn_valid(pfn)))
42+
if (!pfn_valid(pfn))
4243
return;
4344

4445
page = pfn_to_page(pfn);
45-
addr = (unsigned long) page_address(page);
46+
if (page == ZERO_PAGE(0))
47+
return;
48+
49+
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50+
dcache_wbinv_all();
4651

47-
if (vma->vm_flags & VM_EXEC ||
48-
pages_do_alias(addr, address & PAGE_MASK))
49-
cache_wbinv_all();
52+
if (page_mapping_file(page)) {
53+
if (vma->vm_flags & VM_EXEC)
54+
icache_inv_all();
55+
}
56+
}
57+
58+
void flush_kernel_dcache_page(struct page *page)
59+
{
60+
struct address_space *mapping;
61+
62+
mapping = page_mapping_file(page);
63+
64+
if (!mapping || mapping_mapped(mapping))
65+
dcache_wbinv_all();
66+
}
67+
EXPORT_SYMBOL(flush_kernel_dcache_page);
68+
69+
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70+
unsigned long end)
71+
{
72+
dcache_wbinv_all();
5073

51-
clear_bit(PG_arch_1, &(page)->flags);
74+
if (vma->vm_flags & VM_EXEC)
75+
icache_inv_all();
5276
}

arch/csky/abiv1/inc/abi/cacheflush.h

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,46 +4,63 @@
44
#ifndef __ABI_CSKY_CACHEFLUSH_H
55
#define __ABI_CSKY_CACHEFLUSH_H
66

7-
#include <linux/compiler.h>
7+
#include <linux/mm.h>
88
#include <asm/string.h>
99
#include <asm/cache.h>
1010

1111
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1212
extern void flush_dcache_page(struct page *);
1313

14-
#define flush_cache_mm(mm) cache_wbinv_all()
14+
#define flush_cache_mm(mm) dcache_wbinv_all()
1515
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
1616
#define flush_cache_dup_mm(mm) cache_wbinv_all()
1717

18+
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19+
extern void flush_kernel_dcache_page(struct page *);
20+
21+
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
22+
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
23+
24+
static inline void flush_kernel_vmap_range(void *addr, int size)
25+
{
26+
dcache_wbinv_all();
27+
}
28+
static inline void invalidate_kernel_vmap_range(void *addr, int size)
29+
{
30+
dcache_wbinv_all();
31+
}
32+
33+
#define ARCH_HAS_FLUSH_ANON_PAGE
34+
static inline void flush_anon_page(struct vm_area_struct *vma,
35+
struct page *page, unsigned long vmaddr)
36+
{
37+
if (PageAnon(page))
38+
cache_wbinv_all();
39+
}
40+
1841
/*
1942
* if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
2043
* Use cache_wbinv_all() here and need to be improved in future.
2144
*/
22-
#define flush_cache_range(vma, start, end) cache_wbinv_all()
23-
#define flush_cache_vmap(start, end) cache_wbinv_range(start, end)
24-
#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end)
45+
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
46+
#define flush_cache_vmap(start, end) cache_wbinv_all()
47+
#define flush_cache_vunmap(start, end) cache_wbinv_all()
2548

26-
#define flush_icache_page(vma, page) cache_wbinv_all()
49+
#define flush_icache_page(vma, page) do {} while (0);
2750
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
2851

29-
#define flush_icache_user_range(vma, pg, adr, len) \
30-
cache_wbinv_range(adr, adr + len)
52+
#define flush_icache_user_range(vma,page,addr,len) \
53+
flush_dcache_page(page)
3154

3255
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
3356
do { \
34-
cache_wbinv_all(); \
3557
memcpy(dst, src, len); \
36-
cache_wbinv_all(); \
3758
} while (0)
3859

3960
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
4061
do { \
41-
cache_wbinv_all(); \
4262
memcpy(dst, src, len); \
4363
cache_wbinv_all(); \
4464
} while (0)
4565

46-
#define flush_dcache_mmap_lock(mapping) do {} while (0)
47-
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
48-
4966
#endif /* __ABI_CSKY_CACHEFLUSH_H */

arch/csky/abiv1/inc/abi/page.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
22
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
33

4-
extern unsigned long shm_align_mask;
4+
#include <asm/shmparam.h>
5+
56
extern void flush_dcache_page(struct page *page);
67

78
static inline unsigned long pages_do_alias(unsigned long addr1,
89
unsigned long addr2)
910
{
10-
return (addr1 ^ addr2) & shm_align_mask;
11+
return (addr1 ^ addr2) & (SHMLBA-1);
1112
}
1213

1314
static inline void clear_user_page(void *addr, unsigned long vaddr,

0 commit comments

Comments
 (0)