Skip to content

Commit da1b956

Browse files
minchankgregkh
authored andcommitted
android: binder: fix the race mmap and alloc_new_buf_locked
There is RaceFuzzer report like below because we have no lock to close below the race between binder_mmap and binder_alloc_new_buf_locked. To close the race, let's use memory barrier so that if someone see alloc->vma is not NULL, alloc->vma_vm_mm should be never NULL. (I didn't add stable mark intentionallybecause standard android userspace libraries that interact with binder (libbinder & libhwbinder) prevent the mmap/ioctl race. - from Todd) " Thread interleaving: CPU0 (binder_alloc_mmap_handler) CPU1 (binder_alloc_new_buf_locked) ===== ===== // drivers/android/binder_alloc.c // #L718 (v4.18-rc3) alloc->vma = vma; // drivers/android/binder_alloc.c // #L346 (v4.18-rc3) if (alloc->vma == NULL) { ... // alloc->vma is not NULL at this point return ERR_PTR(-ESRCH); } ... // #L438 binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); // In binder_update_page_range() #L218 // But still alloc->vma_vm_mm is NULL here if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) alloc->vma_vm_mm = vma->vm_mm; Crash Log: ================================================================== BUG: KASAN: null-ptr-deref in __atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline] BUG: KASAN: null-ptr-deref in atomic_add_unless include/linux/atomic.h:533 [inline] BUG: KASAN: null-ptr-deref in mmget_not_zero include/linux/sched/mm.h:75 [inline] BUG: KASAN: null-ptr-deref in binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218 Write of size 4 at addr 0000000000000058 by task syz-executor0/11184 CPU: 1 PID: 11184 Comm: syz-executor0 Not tainted 4.18.0-rc3 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x16e/0x22c lib/dump_stack.c:113 kasan_report_error mm/kasan/report.c:352 [inline] kasan_report+0x163/0x380 mm/kasan/report.c:412 check_memory_region_inline mm/kasan/kasan.c:260 [inline] check_memory_region+0x140/0x1a0 mm/kasan/kasan.c:267 kasan_check_write+0x14/0x20 mm/kasan/kasan.c:278 __atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline] atomic_add_unless include/linux/atomic.h:533 [inline] mmget_not_zero include/linux/sched/mm.h:75 [inline] binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218 binder_alloc_new_buf_locked drivers/android/binder_alloc.c:443 [inline] binder_alloc_new_buf+0x467/0xc30 drivers/android/binder_alloc.c:513 binder_transaction+0x125b/0x4fb0 drivers/android/binder.c:2957 binder_thread_write+0xc08/0x2770 drivers/android/binder.c:3528 binder_ioctl_write_read.isra.39+0x24f/0x8e0 drivers/android/binder.c:4456 binder_ioctl+0xa86/0xf34 drivers/android/binder.c:4596 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x154/0xd40 fs/ioctl.c:686 ksys_ioctl+0x94/0xb0 fs/ioctl.c:701 __do_sys_ioctl fs/ioctl.c:708 [inline] __se_sys_ioctl fs/ioctl.c:706 [inline] __x64_sys_ioctl+0x43/0x50 fs/ioctl.c:706 do_syscall_64+0x167/0x4b0 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe " Signed-off-by: Todd Kjos <[email protected]> Signed-off-by: Minchan Kim <[email protected]> Reviewed-by: Martijn Coenen <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 34f1166 commit da1b956

File tree

1 file changed

+35
-8
lines changed

1 file changed

+35
-8
lines changed

drivers/android/binder_alloc.c

Lines changed: 35 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
332332
return vma ? -ENOMEM : -ESRCH;
333333
}
334334

335+
336+
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337+
struct vm_area_struct *vma)
338+
{
339+
if (vma)
340+
alloc->vma_vm_mm = vma->vm_mm;
341+
/*
342+
* If we see alloc->vma is not NULL, buffer data structures set up
343+
* completely. Look at smp_rmb side binder_alloc_get_vma.
344+
* We also want to guarantee new alloc->vma_vm_mm is always visible
345+
* if alloc->vma is set.
346+
*/
347+
smp_wmb();
348+
alloc->vma = vma;
349+
}
350+
351+
static inline struct vm_area_struct *binder_alloc_get_vma(
352+
struct binder_alloc *alloc)
353+
{
354+
struct vm_area_struct *vma = NULL;
355+
356+
if (alloc->vma) {
357+
/* Look at description in binder_alloc_set_vma */
358+
smp_rmb();
359+
vma = alloc->vma;
360+
}
361+
return vma;
362+
}
363+
335364
static struct binder_buffer *binder_alloc_new_buf_locked(
336365
struct binder_alloc *alloc,
337366
size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348377
size_t size, data_offsets_size;
349378
int ret;
350379

351-
if (alloc->vma == NULL) {
380+
if (!binder_alloc_get_vma(alloc)) {
352381
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
353382
"%d: binder_alloc_buf, no vma\n",
354383
alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723752
buffer->free = 1;
724753
binder_insert_free_buffer(alloc, buffer);
725754
alloc->free_async_space = alloc->buffer_size / 2;
726-
barrier();
727-
alloc->vma = vma;
728-
alloc->vma_vm_mm = vma->vm_mm;
755+
binder_alloc_set_vma(alloc, vma);
729756
mmgrab(alloc->vma_vm_mm);
730757

731758
return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754781
int buffers, page_count;
755782
struct binder_buffer *buffer;
756783

757-
BUG_ON(alloc->vma);
758-
759784
buffers = 0;
760785
mutex_lock(&alloc->mutex);
786+
BUG_ON(alloc->vma);
787+
761788
while ((n = rb_first(&alloc->allocated_buffers))) {
762789
buffer = rb_entry(n, struct binder_buffer, rb_node);
763790

@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900927
*/
901928
void binder_alloc_vma_close(struct binder_alloc *alloc)
902929
{
903-
WRITE_ONCE(alloc->vma, NULL);
930+
binder_alloc_set_vma(alloc, NULL);
904931
}
905932

906933
/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935962

936963
index = page - alloc->pages;
937964
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
938-
vma = alloc->vma;
965+
vma = binder_alloc_get_vma(alloc);
939966
if (vma) {
940967
if (!mmget_not_zero(alloc->vma_vm_mm))
941968
goto err_mmget;

0 commit comments

Comments
 (0)