@@ -332,6 +332,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
332
332
return vma ? - ENOMEM : - ESRCH ;
333
333
}
334
334
335
+
336
+ static inline void binder_alloc_set_vma (struct binder_alloc * alloc ,
337
+ struct vm_area_struct * vma )
338
+ {
339
+ if (vma )
340
+ alloc -> vma_vm_mm = vma -> vm_mm ;
341
+ /*
342
+ * If we see alloc->vma is not NULL, buffer data structures set up
343
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
344
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
345
+ * if alloc->vma is set.
346
+ */
347
+ smp_wmb ();
348
+ alloc -> vma = vma ;
349
+ }
350
+
351
+ static inline struct vm_area_struct * binder_alloc_get_vma (
352
+ struct binder_alloc * alloc )
353
+ {
354
+ struct vm_area_struct * vma = NULL ;
355
+
356
+ if (alloc -> vma ) {
357
+ /* Look at description in binder_alloc_set_vma */
358
+ smp_rmb ();
359
+ vma = alloc -> vma ;
360
+ }
361
+ return vma ;
362
+ }
363
+
335
364
static struct binder_buffer * binder_alloc_new_buf_locked (
336
365
struct binder_alloc * alloc ,
337
366
size_t data_size ,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348
377
size_t size , data_offsets_size ;
349
378
int ret ;
350
379
351
- if (alloc -> vma == NULL ) {
380
+ if (! binder_alloc_get_vma ( alloc ) ) {
352
381
binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
353
382
"%d: binder_alloc_buf, no vma\n" ,
354
383
alloc -> pid );
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723
752
buffer -> free = 1 ;
724
753
binder_insert_free_buffer (alloc , buffer );
725
754
alloc -> free_async_space = alloc -> buffer_size / 2 ;
726
- barrier ();
727
- alloc -> vma = vma ;
728
- alloc -> vma_vm_mm = vma -> vm_mm ;
755
+ binder_alloc_set_vma (alloc , vma );
729
756
mmgrab (alloc -> vma_vm_mm );
730
757
731
758
return 0 ;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754
781
int buffers , page_count ;
755
782
struct binder_buffer * buffer ;
756
783
757
- BUG_ON (alloc -> vma );
758
-
759
784
buffers = 0 ;
760
785
mutex_lock (& alloc -> mutex );
786
+ BUG_ON (alloc -> vma );
787
+
761
788
while ((n = rb_first (& alloc -> allocated_buffers ))) {
762
789
buffer = rb_entry (n , struct binder_buffer , rb_node );
763
790
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900
927
*/
901
928
void binder_alloc_vma_close (struct binder_alloc * alloc )
902
929
{
903
- WRITE_ONCE (alloc -> vma , NULL );
930
+ binder_alloc_set_vma (alloc , NULL );
904
931
}
905
932
906
933
/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935
962
936
963
index = page - alloc -> pages ;
937
964
page_addr = (uintptr_t )alloc -> buffer + index * PAGE_SIZE ;
938
- vma = alloc -> vma ;
965
+ vma = binder_alloc_get_vma ( alloc ) ;
939
966
if (vma ) {
940
967
if (!mmget_not_zero (alloc -> vma_vm_mm ))
941
968
goto err_mmget ;
0 commit comments