Skip to content

Commit c296d4d

Browse files
Qian Caitorvalds
authored andcommitted
asm-generic: fix a compilation warning
Fix this compilation warning on x86 by making flush_cache_vmap() inline. lib/ioremap.c: In function 'ioremap_page_range': lib/ioremap.c:214:16: warning: variable 'start' set but not used [-Wunused-but-set-variable] unsigned long start; ^~~~~ While at it, convert all other similar functions to inline for consistency. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Qian Cai <[email protected]> Reviewed-by: Andrew Morton <[email protected]> Cc: Arnd Bergmann <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3a7f0ad commit c296d4d

File tree

1 file changed

+60
-14
lines changed

1 file changed

+60
-14
lines changed

include/asm-generic/cacheflush.h

Lines changed: 60 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,24 +5,70 @@
55
/* Keep includes the same across arches. */
66
#include <linux/mm.h>
77

8+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
9+
810
/*
911
* The cache doesn't need to be flushed when TLB entries change when
1012
* the cache is mapped to physical memory, not virtual memory
1113
*/
12-
#define flush_cache_all() do { } while (0)
13-
#define flush_cache_mm(mm) do { } while (0)
14-
#define flush_cache_dup_mm(mm) do { } while (0)
15-
#define flush_cache_range(vma, start, end) do { } while (0)
16-
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
17-
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
18-
#define flush_dcache_page(page) do { } while (0)
19-
#define flush_dcache_mmap_lock(mapping) do { } while (0)
20-
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
21-
#define flush_icache_range(start, end) do { } while (0)
22-
#define flush_icache_page(vma,pg) do { } while (0)
23-
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
24-
#define flush_cache_vmap(start, end) do { } while (0)
25-
#define flush_cache_vunmap(start, end) do { } while (0)
14+
static inline void flush_cache_all(void)
15+
{
16+
}
17+
18+
static inline void flush_cache_mm(struct mm_struct *mm)
19+
{
20+
}
21+
22+
static inline void flush_cache_dup_mm(struct mm_struct *mm)
23+
{
24+
}
25+
26+
static inline void flush_cache_range(struct vm_area_struct *vma,
27+
unsigned long start,
28+
unsigned long end)
29+
{
30+
}
31+
32+
static inline void flush_cache_page(struct vm_area_struct *vma,
33+
unsigned long vmaddr,
34+
unsigned long pfn)
35+
{
36+
}
37+
38+
static inline void flush_dcache_page(struct page *page)
39+
{
40+
}
41+
42+
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
43+
{
44+
}
45+
46+
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
47+
{
48+
}
49+
50+
static inline void flush_icache_range(unsigned long start, unsigned long end)
51+
{
52+
}
53+
54+
static inline void flush_icache_page(struct vm_area_struct *vma,
55+
struct page *page)
56+
{
57+
}
58+
59+
static inline void flush_icache_user_range(struct vm_area_struct *vma,
60+
struct page *page,
61+
unsigned long addr, int len)
62+
{
63+
}
64+
65+
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
66+
{
67+
}
68+
69+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
70+
{
71+
}
2672

2773
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
2874
do { \

0 commit comments

Comments
 (0)