Skip to content

Commit bb6e04a

Browse files
arndbakpm00
authored andcommitted
kasan: use internal prototypes matching gcc-13 builtins
gcc-13 warns about function definitions for builtin interfaces that have a different prototype, e.g.: In file included from kasan_test.c:31: kasan.h:574:6: error: conflicting types for built-in function '__asan_register_globals'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 574 | void __asan_register_globals(struct kasan_global *globals, size_t size); kasan.h:577:6: error: conflicting types for built-in function '__asan_alloca_poison'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 577 | void __asan_alloca_poison(unsigned long addr, size_t size); kasan.h:580:6: error: conflicting types for built-in function '__asan_load1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 580 | void __asan_load1(unsigned long addr); kasan.h:581:6: error: conflicting types for built-in function '__asan_store1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 581 | void __asan_store1(unsigned long addr); kasan.h:643:6: error: conflicting types for built-in function '__hwasan_tag_memory'; expected 'void(void *, unsigned char, long int)' [-Werror=builtin-declaration-mismatch] 643 | void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); The two problems are: - Addresses are passes as 'unsigned long' in the kernel, but gcc-13 expects a 'void *'. - sizes meant to use a signed ssize_t rather than size_t. Change all the prototypes to match these. Using 'void *' consistently for addresses gets rid of a couple of type casts, so push that down to the leaf functions where possible. This now passes all randconfig builds on arm, arm64 and x86, but I have not tested it on the other architectures that support kasan, since they tend to fail randconfig builds in other ways. This might fail if any of the 32-bit architectures expect a 'long' instead of 'int' for the size argument. The __asan_allocas_unpoison() function prototype is somewhat weird, since it uses a pointer for 'stack_top' and an size_t for 'stack_bottom'. This looks like it is meant to be 'addr' and 'size' like the others, but the implementation clearly treats them as 'top' and 'bottom'. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Arnd Bergmann <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Marco Elver <[email protected]> Cc: Vincenzo Frascino <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent fb646a4 commit bb6e04a

File tree

12 files changed

+164
-165
lines changed

12 files changed

+164
-165
lines changed

arch/arm64/kernel/traps.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
10441044
bool recover = esr & KASAN_ESR_RECOVER;
10451045
bool write = esr & KASAN_ESR_WRITE;
10461046
size_t size = KASAN_ESR_SIZE(esr);
1047-
u64 addr = regs->regs[0];
1047+
void *addr = (void *)regs->regs[0];
10481048
u64 pc = regs->pc;
10491049

10501050
kasan_report(addr, size, write, pc);

arch/arm64/mm/fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ static void report_tag_fault(unsigned long addr, unsigned long esr,
317317
* find out access size.
318318
*/
319319
bool is_write = !!(esr & ESR_ELx_WNR);
320-
kasan_report(addr, 0, is_write, regs->pc);
320+
kasan_report((void *)addr, 0, is_write, regs->pc);
321321
}
322322
#else
323323
/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */

include/linux/kasan.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ static inline void *kasan_reset_tag(const void *addr)
343343
* @is_write: whether the bad access is a write or a read
344344
* @ip: instruction pointer for the accessibility check or the bad access itself
345345
*/
346-
bool kasan_report(unsigned long addr, size_t size,
346+
bool kasan_report(const void *addr, size_t size,
347347
bool is_write, unsigned long ip);
348348

349349
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */

mm/kasan/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
445445
bool __kasan_check_byte(const void *address, unsigned long ip)
446446
{
447447
if (!kasan_byte_accessible(address)) {
448-
kasan_report((unsigned long)address, 1, false, ip);
448+
kasan_report(address, 1, false, ip);
449449
return false;
450450
}
451451
return true;

mm/kasan/generic.c

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -40,39 +40,39 @@
4040
* depending on memory access size X.
4141
*/
4242

43-
static __always_inline bool memory_is_poisoned_1(unsigned long addr)
43+
static __always_inline bool memory_is_poisoned_1(const void *addr)
4444
{
45-
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
45+
s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
4646

4747
if (unlikely(shadow_value)) {
48-
s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
48+
s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
4949
return unlikely(last_accessible_byte >= shadow_value);
5050
}
5151

5252
return false;
5353
}
5454

55-
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
55+
static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
5656
unsigned long size)
5757
{
58-
u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
58+
u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
5959

6060
/*
6161
* Access crosses 8(shadow size)-byte boundary. Such access maps
6262
* into 2 shadow bytes, so we need to check them both.
6363
*/
64-
if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
64+
if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
6565
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
6666

6767
return memory_is_poisoned_1(addr + size - 1);
6868
}
6969

70-
static __always_inline bool memory_is_poisoned_16(unsigned long addr)
70+
static __always_inline bool memory_is_poisoned_16(const void *addr)
7171
{
72-
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
72+
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
7373

7474
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
75-
if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
75+
if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
7676
return *shadow_addr || memory_is_poisoned_1(addr + 15);
7777

7878
return *shadow_addr;
@@ -120,26 +120,25 @@ static __always_inline unsigned long memory_is_nonzero(const void *start,
120120
return bytes_is_nonzero(start, (end - start) % 8);
121121
}
122122

123-
static __always_inline bool memory_is_poisoned_n(unsigned long addr,
124-
size_t size)
123+
static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
125124
{
126125
unsigned long ret;
127126

128-
ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
129-
kasan_mem_to_shadow((void *)addr + size - 1) + 1);
127+
ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
128+
kasan_mem_to_shadow(addr + size - 1) + 1);
130129

131130
if (unlikely(ret)) {
132-
unsigned long last_byte = addr + size - 1;
133-
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
131+
const void *last_byte = addr + size - 1;
132+
s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
134133

135134
if (unlikely(ret != (unsigned long)last_shadow ||
136-
((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
135+
(((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
137136
return true;
138137
}
139138
return false;
140139
}
141140

142-
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
141+
static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
143142
{
144143
if (__builtin_constant_p(size)) {
145144
switch (size) {
@@ -159,7 +158,7 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
159158
return memory_is_poisoned_n(addr, size);
160159
}
161160

162-
static __always_inline bool check_region_inline(unsigned long addr,
161+
static __always_inline bool check_region_inline(const void *addr,
163162
size_t size, bool write,
164163
unsigned long ret_ip)
165164
{
@@ -172,7 +171,7 @@ static __always_inline bool check_region_inline(unsigned long addr,
172171
if (unlikely(addr + size < addr))
173172
return !kasan_report(addr, size, write, ret_ip);
174173

175-
if (unlikely(!addr_has_metadata((void *)addr)))
174+
if (unlikely(!addr_has_metadata(addr)))
176175
return !kasan_report(addr, size, write, ret_ip);
177176

178177
if (likely(!memory_is_poisoned(addr, size)))
@@ -181,7 +180,7 @@ static __always_inline bool check_region_inline(unsigned long addr,
181180
return !kasan_report(addr, size, write, ret_ip);
182181
}
183182

184-
bool kasan_check_range(unsigned long addr, size_t size, bool write,
183+
bool kasan_check_range(const void *addr, size_t size, bool write,
185184
unsigned long ret_ip)
186185
{
187186
return check_region_inline(addr, size, write, ret_ip);
@@ -221,36 +220,37 @@ static void register_global(struct kasan_global *global)
221220
KASAN_GLOBAL_REDZONE, false);
222221
}
223222

224-
void __asan_register_globals(struct kasan_global *globals, size_t size)
223+
void __asan_register_globals(void *ptr, ssize_t size)
225224
{
226225
int i;
226+
struct kasan_global *globals = ptr;
227227

228228
for (i = 0; i < size; i++)
229229
register_global(&globals[i]);
230230
}
231231
EXPORT_SYMBOL(__asan_register_globals);
232232

233-
void __asan_unregister_globals(struct kasan_global *globals, size_t size)
233+
void __asan_unregister_globals(void *ptr, ssize_t size)
234234
{
235235
}
236236
EXPORT_SYMBOL(__asan_unregister_globals);
237237

238238
#define DEFINE_ASAN_LOAD_STORE(size) \
239-
void __asan_load##size(unsigned long addr) \
239+
void __asan_load##size(void *addr) \
240240
{ \
241241
check_region_inline(addr, size, false, _RET_IP_); \
242242
} \
243243
EXPORT_SYMBOL(__asan_load##size); \
244244
__alias(__asan_load##size) \
245-
void __asan_load##size##_noabort(unsigned long); \
245+
void __asan_load##size##_noabort(void *); \
246246
EXPORT_SYMBOL(__asan_load##size##_noabort); \
247-
void __asan_store##size(unsigned long addr) \
247+
void __asan_store##size(void *addr) \
248248
{ \
249249
check_region_inline(addr, size, true, _RET_IP_); \
250250
} \
251251
EXPORT_SYMBOL(__asan_store##size); \
252252
__alias(__asan_store##size) \
253-
void __asan_store##size##_noabort(unsigned long); \
253+
void __asan_store##size##_noabort(void *); \
254254
EXPORT_SYMBOL(__asan_store##size##_noabort)
255255

256256
DEFINE_ASAN_LOAD_STORE(1);
@@ -259,32 +259,32 @@ DEFINE_ASAN_LOAD_STORE(4);
259259
DEFINE_ASAN_LOAD_STORE(8);
260260
DEFINE_ASAN_LOAD_STORE(16);
261261

262-
void __asan_loadN(unsigned long addr, size_t size)
262+
void __asan_loadN(void *addr, ssize_t size)
263263
{
264264
kasan_check_range(addr, size, false, _RET_IP_);
265265
}
266266
EXPORT_SYMBOL(__asan_loadN);
267267

268268
__alias(__asan_loadN)
269-
void __asan_loadN_noabort(unsigned long, size_t);
269+
void __asan_loadN_noabort(void *, ssize_t);
270270
EXPORT_SYMBOL(__asan_loadN_noabort);
271271

272-
void __asan_storeN(unsigned long addr, size_t size)
272+
void __asan_storeN(void *addr, ssize_t size)
273273
{
274274
kasan_check_range(addr, size, true, _RET_IP_);
275275
}
276276
EXPORT_SYMBOL(__asan_storeN);
277277

278278
__alias(__asan_storeN)
279-
void __asan_storeN_noabort(unsigned long, size_t);
279+
void __asan_storeN_noabort(void *, ssize_t);
280280
EXPORT_SYMBOL(__asan_storeN_noabort);
281281

282282
/* to shut up compiler complaints */
283283
void __asan_handle_no_return(void) {}
284284
EXPORT_SYMBOL(__asan_handle_no_return);
285285

286286
/* Emitted by compiler to poison alloca()ed objects. */
287-
void __asan_alloca_poison(unsigned long addr, size_t size)
287+
void __asan_alloca_poison(void *addr, ssize_t size)
288288
{
289289
size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
290290
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
@@ -295,7 +295,7 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
295295
KASAN_ALLOCA_REDZONE_SIZE);
296296
const void *right_redzone = (const void *)(addr + rounded_up_size);
297297

298-
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
298+
WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
299299

300300
kasan_unpoison((const void *)(addr + rounded_down_size),
301301
size - rounded_down_size, false);
@@ -307,18 +307,18 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
307307
EXPORT_SYMBOL(__asan_alloca_poison);
308308

309309
/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
310-
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
310+
void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
311311
{
312-
if (unlikely(!stack_top || stack_top > stack_bottom))
312+
if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
313313
return;
314314

315-
kasan_unpoison(stack_top, stack_bottom - stack_top, false);
315+
kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
316316
}
317317
EXPORT_SYMBOL(__asan_allocas_unpoison);
318318

319319
/* Emitted by the compiler to [un]poison local variables. */
320320
#define DEFINE_ASAN_SET_SHADOW(byte) \
321-
void __asan_set_shadow_##byte(const void *addr, size_t size) \
321+
void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
322322
{ \
323323
__memset((void *)addr, 0x##byte, size); \
324324
} \

0 commit comments

Comments
 (0)