@@ -140,6 +140,13 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
140
140
{
141
141
void * shadow_start , * shadow_end ;
142
142
143
+ /*
144
+ * Perform shadow offset calculation based on untagged address, as
145
+ * some of the callers (e.g. kasan_poison_object_data) pass tagged
146
+ * addresses to this function.
147
+ */
148
+ address = reset_tag (address );
149
+
143
150
shadow_start = kasan_mem_to_shadow (address );
144
151
shadow_end = kasan_mem_to_shadow (address + size );
145
152
@@ -148,11 +155,24 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
148
155
149
156
void kasan_unpoison_shadow (const void * address , size_t size )
150
157
{
151
- kasan_poison_shadow (address , size , 0 );
158
+ u8 tag = get_tag (address );
159
+
160
+ /*
161
+ * Perform shadow offset calculation based on untagged address, as
162
+ * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
163
+ * addresses to this function.
164
+ */
165
+ address = reset_tag (address );
166
+
167
+ kasan_poison_shadow (address , size , tag );
152
168
153
169
if (size & KASAN_SHADOW_MASK ) {
154
170
u8 * shadow = (u8 * )kasan_mem_to_shadow (address + size );
155
- * shadow = size & KASAN_SHADOW_MASK ;
171
+
172
+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
173
+ * shadow = tag ;
174
+ else
175
+ * shadow = size & KASAN_SHADOW_MASK ;
156
176
}
157
177
}
158
178
@@ -200,8 +220,9 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
200
220
201
221
void kasan_alloc_pages (struct page * page , unsigned int order )
202
222
{
203
- if (likely (!PageHighMem (page )))
204
- kasan_unpoison_shadow (page_address (page ), PAGE_SIZE << order );
223
+ if (unlikely (PageHighMem (page )))
224
+ return ;
225
+ kasan_unpoison_shadow (page_address (page ), PAGE_SIZE << order );
205
226
}
206
227
207
228
void kasan_free_pages (struct page * page , unsigned int order )
@@ -218,6 +239,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
218
239
*/
219
240
static inline unsigned int optimal_redzone (unsigned int object_size )
220
241
{
242
+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
243
+ return 0 ;
244
+
221
245
return
222
246
object_size <= 64 - 16 ? 16 :
223
247
object_size <= 128 - 32 ? 32 :
@@ -232,27 +256,28 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
232
256
slab_flags_t * flags )
233
257
{
234
258
unsigned int orig_size = * size ;
259
+ unsigned int redzone_size ;
235
260
int redzone_adjust ;
236
261
237
262
/* Add alloc meta. */
238
263
cache -> kasan_info .alloc_meta_offset = * size ;
239
264
* size += sizeof (struct kasan_alloc_meta );
240
265
241
266
/* Add free meta. */
242
- if (cache -> flags & SLAB_TYPESAFE_BY_RCU || cache -> ctor ||
243
- cache -> object_size < sizeof (struct kasan_free_meta )) {
267
+ if (IS_ENABLED (CONFIG_KASAN_GENERIC ) &&
268
+ (cache -> flags & SLAB_TYPESAFE_BY_RCU || cache -> ctor ||
269
+ cache -> object_size < sizeof (struct kasan_free_meta ))) {
244
270
cache -> kasan_info .free_meta_offset = * size ;
245
271
* size += sizeof (struct kasan_free_meta );
246
272
}
247
- redzone_adjust = optimal_redzone (cache -> object_size ) -
248
- (* size - cache -> object_size );
249
273
274
+ redzone_size = optimal_redzone (cache -> object_size );
275
+ redzone_adjust = redzone_size - (* size - cache -> object_size );
250
276
if (redzone_adjust > 0 )
251
277
* size += redzone_adjust ;
252
278
253
279
* size = min_t (unsigned int , KMALLOC_MAX_SIZE ,
254
- max (* size , cache -> object_size +
255
- optimal_redzone (cache -> object_size )));
280
+ max (* size , cache -> object_size + redzone_size ));
256
281
257
282
/*
258
283
* If the metadata doesn't fit, don't enable KASAN at all.
@@ -265,6 +290,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
265
290
return ;
266
291
}
267
292
293
+ cache -> align = round_up (cache -> align , KASAN_SHADOW_SCALE_SIZE );
294
+
268
295
* flags |= SLAB_KASAN ;
269
296
}
270
297
@@ -309,6 +336,32 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
309
336
KASAN_KMALLOC_REDZONE );
310
337
}
311
338
339
+ /*
340
+ * Since it's desirable to only call object contructors once during slab
341
+ * allocation, we preassign tags to all such objects. Also preassign tags for
342
+ * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
343
+ * For SLAB allocator we can't preassign tags randomly since the freelist is
344
+ * stored as an array of indexes instead of a linked list. Assign tags based
345
+ * on objects indexes, so that objects that are next to each other get
346
+ * different tags.
347
+ * After a tag is assigned, the object always gets allocated with the same tag.
348
+ * The reason is that we can't change tags for objects with constructors on
349
+ * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
350
+ * code can save the pointer to the object somewhere (e.g. in the object
351
+ * itself). Then if we retag it, the old saved pointer will become invalid.
352
+ */
353
+ static u8 assign_tag (struct kmem_cache * cache , const void * object , bool new )
354
+ {
355
+ if (!cache -> ctor && !(cache -> flags & SLAB_TYPESAFE_BY_RCU ))
356
+ return new ? KASAN_TAG_KERNEL : random_tag ();
357
+
358
+ #ifdef CONFIG_SLAB
359
+ return (u8 )obj_to_index (cache , virt_to_page (object ), (void * )object );
360
+ #else
361
+ return new ? random_tag () : get_tag (object );
362
+ #endif
363
+ }
364
+
312
365
void * kasan_init_slab_obj (struct kmem_cache * cache , const void * object )
313
366
{
314
367
struct kasan_alloc_meta * alloc_info ;
@@ -319,6 +372,9 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
319
372
alloc_info = get_alloc_info (cache , object );
320
373
__memset (alloc_info , 0 , sizeof (* alloc_info ));
321
374
375
+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
376
+ object = set_tag (object , assign_tag (cache , object , true));
377
+
322
378
return (void * )object ;
323
379
}
324
380
@@ -327,15 +383,30 @@ void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
327
383
return kasan_kmalloc (cache , object , cache -> object_size , flags );
328
384
}
329
385
386
+ static inline bool shadow_invalid (u8 tag , s8 shadow_byte )
387
+ {
388
+ if (IS_ENABLED (CONFIG_KASAN_GENERIC ))
389
+ return shadow_byte < 0 ||
390
+ shadow_byte >= KASAN_SHADOW_SCALE_SIZE ;
391
+ else
392
+ return tag != (u8 )shadow_byte ;
393
+ }
394
+
330
395
static bool __kasan_slab_free (struct kmem_cache * cache , void * object ,
331
396
unsigned long ip , bool quarantine )
332
397
{
333
398
s8 shadow_byte ;
399
+ u8 tag ;
400
+ void * tagged_object ;
334
401
unsigned long rounded_up_size ;
335
402
403
+ tag = get_tag (object );
404
+ tagged_object = object ;
405
+ object = reset_tag (object );
406
+
336
407
if (unlikely (nearest_obj (cache , virt_to_head_page (object ), object ) !=
337
408
object )) {
338
- kasan_report_invalid_free (object , ip );
409
+ kasan_report_invalid_free (tagged_object , ip );
339
410
return true;
340
411
}
341
412
@@ -344,20 +415,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
344
415
return false;
345
416
346
417
shadow_byte = READ_ONCE (* (s8 * )kasan_mem_to_shadow (object ));
347
- if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE ) {
348
- kasan_report_invalid_free (object , ip );
418
+ if (shadow_invalid ( tag , shadow_byte ) ) {
419
+ kasan_report_invalid_free (tagged_object , ip );
349
420
return true;
350
421
}
351
422
352
423
rounded_up_size = round_up (cache -> object_size , KASAN_SHADOW_SCALE_SIZE );
353
424
kasan_poison_shadow (object , rounded_up_size , KASAN_KMALLOC_FREE );
354
425
355
- if (!quarantine || unlikely (!(cache -> flags & SLAB_KASAN )))
426
+ if ((IS_ENABLED (CONFIG_KASAN_GENERIC ) && !quarantine ) ||
427
+ unlikely (!(cache -> flags & SLAB_KASAN )))
356
428
return false;
357
429
358
430
set_track (& get_alloc_info (cache , object )-> free_track , GFP_NOWAIT );
359
431
quarantine_put (get_free_info (cache , object ), cache );
360
- return true;
432
+
433
+ return IS_ENABLED (CONFIG_KASAN_GENERIC );
361
434
}
362
435
363
436
bool kasan_slab_free (struct kmem_cache * cache , void * object , unsigned long ip )
@@ -370,6 +443,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
370
443
{
371
444
unsigned long redzone_start ;
372
445
unsigned long redzone_end ;
446
+ u8 tag ;
373
447
374
448
if (gfpflags_allow_blocking (flags ))
375
449
quarantine_reduce ();
@@ -382,14 +456,18 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
382
456
redzone_end = round_up ((unsigned long )object + cache -> object_size ,
383
457
KASAN_SHADOW_SCALE_SIZE );
384
458
385
- kasan_unpoison_shadow (object , size );
459
+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
460
+ tag = assign_tag (cache , object , false);
461
+
462
+ /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
463
+ kasan_unpoison_shadow (set_tag (object , tag ), size );
386
464
kasan_poison_shadow ((void * )redzone_start , redzone_end - redzone_start ,
387
465
KASAN_KMALLOC_REDZONE );
388
466
389
467
if (cache -> flags & SLAB_KASAN )
390
468
set_track (& get_alloc_info (cache , object )-> alloc_track , flags );
391
469
392
- return ( void * ) object ;
470
+ return set_tag ( object , tag ) ;
393
471
}
394
472
EXPORT_SYMBOL (kasan_kmalloc );
395
473
@@ -439,7 +517,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
439
517
page = virt_to_head_page (ptr );
440
518
441
519
if (unlikely (!PageSlab (page ))) {
442
- if (ptr != page_address (page )) {
520
+ if (reset_tag ( ptr ) != page_address (page )) {
443
521
kasan_report_invalid_free (ptr , ip );
444
522
return ;
445
523
}
@@ -452,7 +530,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
452
530
453
531
void kasan_kfree_large (void * ptr , unsigned long ip )
454
532
{
455
- if (ptr != page_address (virt_to_head_page (ptr )))
533
+ if (reset_tag ( ptr ) != page_address (virt_to_head_page (ptr )))
456
534
kasan_report_invalid_free (ptr , ip );
457
535
/* The object will be poisoned by page_alloc. */
458
536
}
0 commit comments