|
5 | 5 | /* Keep includes the same across arches. */
|
6 | 6 | #include <linux/mm.h>
|
7 | 7 |
|
| 8 | +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
| 9 | + |
8 | 10 | /*
|
9 | 11 | * The cache doesn't need to be flushed when TLB entries change when
|
10 | 12 | * the cache is mapped to physical memory, not virtual memory
|
11 | 13 | */
|
12 |
| -#define flush_cache_all() do { } while (0) |
13 |
| -#define flush_cache_mm(mm) do { } while (0) |
14 |
| -#define flush_cache_dup_mm(mm) do { } while (0) |
15 |
| -#define flush_cache_range(vma, start, end) do { } while (0) |
16 |
| -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
17 |
| -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
18 |
| -#define flush_dcache_page(page) do { } while (0) |
19 |
| -#define flush_dcache_mmap_lock(mapping) do { } while (0) |
20 |
| -#define flush_dcache_mmap_unlock(mapping) do { } while (0) |
21 |
| -#define flush_icache_range(start, end) do { } while (0) |
22 |
| -#define flush_icache_page(vma,pg) do { } while (0) |
23 |
| -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) |
24 |
| -#define flush_cache_vmap(start, end) do { } while (0) |
25 |
| -#define flush_cache_vunmap(start, end) do { } while (0) |
| 14 | +static inline void flush_cache_all(void) |
| 15 | +{ |
| 16 | +} |
| 17 | + |
| 18 | +static inline void flush_cache_mm(struct mm_struct *mm) |
| 19 | +{ |
| 20 | +} |
| 21 | + |
| 22 | +static inline void flush_cache_dup_mm(struct mm_struct *mm) |
| 23 | +{ |
| 24 | +} |
| 25 | + |
| 26 | +static inline void flush_cache_range(struct vm_area_struct *vma, |
| 27 | + unsigned long start, |
| 28 | + unsigned long end) |
| 29 | +{ |
| 30 | +} |
| 31 | + |
| 32 | +static inline void flush_cache_page(struct vm_area_struct *vma, |
| 33 | + unsigned long vmaddr, |
| 34 | + unsigned long pfn) |
| 35 | +{ |
| 36 | +} |
| 37 | + |
| 38 | +static inline void flush_dcache_page(struct page *page) |
| 39 | +{ |
| 40 | +} |
| 41 | + |
| 42 | +static inline void flush_dcache_mmap_lock(struct address_space *mapping) |
| 43 | +{ |
| 44 | +} |
| 45 | + |
| 46 | +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) |
| 47 | +{ |
| 48 | +} |
| 49 | + |
| 50 | +static inline void flush_icache_range(unsigned long start, unsigned long end) |
| 51 | +{ |
| 52 | +} |
| 53 | + |
| 54 | +static inline void flush_icache_page(struct vm_area_struct *vma, |
| 55 | + struct page *page) |
| 56 | +{ |
| 57 | +} |
| 58 | + |
| 59 | +static inline void flush_icache_user_range(struct vm_area_struct *vma, |
| 60 | + struct page *page, |
| 61 | + unsigned long addr, int len) |
| 62 | +{ |
| 63 | +} |
| 64 | + |
| 65 | +static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 66 | +{ |
| 67 | +} |
| 68 | + |
| 69 | +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
| 70 | +{ |
| 71 | +} |
26 | 72 |
|
27 | 73 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
28 | 74 | do { \
|
|
0 commit comments