@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62
62
struct binder_buffer * buffer )
63
63
{
64
64
if (list_is_last (& buffer -> entry , & alloc -> buffers ))
65
- return alloc -> buffer +
66
- alloc -> buffer_size - (void * )buffer -> data ;
67
- return (size_t )binder_buffer_next (buffer ) - (size_t )buffer -> data ;
65
+ return ( u8 * ) alloc -> buffer +
66
+ alloc -> buffer_size - (u8 * )buffer -> data ;
67
+ return (u8 * )binder_buffer_next (buffer )-> data - (u8 * )buffer -> data ;
68
68
}
69
69
70
70
static void binder_insert_free_buffer (struct binder_alloc * alloc ,
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
114
114
buffer = rb_entry (parent , struct binder_buffer , rb_node );
115
115
BUG_ON (buffer -> free );
116
116
117
- if (new_buffer < buffer )
117
+ if (new_buffer -> data < buffer -> data )
118
118
p = & parent -> rb_left ;
119
- else if (new_buffer > buffer )
119
+ else if (new_buffer -> data > buffer -> data )
120
120
p = & parent -> rb_right ;
121
121
else
122
122
BUG ();
@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
131
131
{
132
132
struct rb_node * n = alloc -> allocated_buffers .rb_node ;
133
133
struct binder_buffer * buffer ;
134
- struct binder_buffer * kern_ptr ;
134
+ void * kern_ptr ;
135
135
136
- kern_ptr = (struct binder_buffer * )(user_ptr - alloc -> user_buffer_offset
137
- - offsetof(struct binder_buffer , data ));
136
+ kern_ptr = (void * )(user_ptr - alloc -> user_buffer_offset );
138
137
139
138
while (n ) {
140
139
buffer = rb_entry (n , struct binder_buffer , rb_node );
141
140
BUG_ON (buffer -> free );
142
141
143
- if (kern_ptr < buffer )
142
+ if (kern_ptr < buffer -> data )
144
143
n = n -> rb_left ;
145
- else if (kern_ptr > buffer )
144
+ else if (kern_ptr > buffer -> data )
146
145
n = n -> rb_right ;
147
146
else {
148
147
/*
@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
330
329
return ERR_PTR (- ENOSPC );
331
330
}
332
331
332
+ /* Pad 0-size buffers so they get assigned unique addresses */
333
+ size = max (size , sizeof (void * ));
334
+
333
335
while (n ) {
334
336
buffer = rb_entry (n , struct binder_buffer , rb_node );
335
337
BUG_ON (!buffer -> free );
@@ -389,32 +391,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
389
391
390
392
has_page_addr =
391
393
(void * )(((uintptr_t )buffer -> data + buffer_size ) & PAGE_MASK );
392
- if (n == NULL ) {
393
- if (size + sizeof (struct binder_buffer ) + 4 >= buffer_size )
394
- buffer_size = size ; /* no room for other buffers */
395
- else
396
- buffer_size = size + sizeof (struct binder_buffer );
397
- }
394
+ WARN_ON (n && buffer_size != size );
398
395
end_page_addr =
399
- (void * )PAGE_ALIGN ((uintptr_t )buffer -> data + buffer_size );
396
+ (void * )PAGE_ALIGN ((uintptr_t )buffer -> data + size );
400
397
if (end_page_addr > has_page_addr )
401
398
end_page_addr = has_page_addr ;
402
399
ret = binder_update_page_range (alloc , 1 ,
403
400
(void * )PAGE_ALIGN ((uintptr_t )buffer -> data ), end_page_addr , NULL );
404
401
if (ret )
405
402
return ERR_PTR (ret );
406
403
407
- rb_erase (best_fit , & alloc -> free_buffers );
408
- buffer -> free = 0 ;
409
- buffer -> free_in_progress = 0 ;
410
- binder_insert_allocated_buffer_locked (alloc , buffer );
411
404
if (buffer_size != size ) {
412
- struct binder_buffer * new_buffer = ( void * ) buffer -> data + size ;
405
+ struct binder_buffer * new_buffer ;
413
406
407
+ new_buffer = kzalloc (sizeof (* buffer ), GFP_KERNEL );
408
+ if (!new_buffer ) {
409
+ pr_err ("%s: %d failed to alloc new buffer struct\n" ,
410
+ __func__ , alloc -> pid );
411
+ goto err_alloc_buf_struct_failed ;
412
+ }
413
+ new_buffer -> data = (u8 * )buffer -> data + size ;
414
414
list_add (& new_buffer -> entry , & buffer -> entry );
415
415
new_buffer -> free = 1 ;
416
416
binder_insert_free_buffer (alloc , new_buffer );
417
417
}
418
+
419
+ rb_erase (best_fit , & alloc -> free_buffers );
420
+ buffer -> free = 0 ;
421
+ buffer -> free_in_progress = 0 ;
422
+ binder_insert_allocated_buffer_locked (alloc , buffer );
418
423
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
419
424
"%d: binder_alloc_buf size %zd got %pK\n" ,
420
425
alloc -> pid , size , buffer );
@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
429
434
alloc -> pid , size , alloc -> free_async_space );
430
435
}
431
436
return buffer ;
437
+
438
+ err_alloc_buf_struct_failed :
439
+ binder_update_page_range (alloc , 0 ,
440
+ (void * )PAGE_ALIGN ((uintptr_t )buffer -> data ),
441
+ end_page_addr , NULL );
442
+ return ERR_PTR (- ENOMEM );
432
443
}
433
444
434
445
/**
@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
463
474
464
475
static void * buffer_start_page (struct binder_buffer * buffer )
465
476
{
466
- return (void * )((uintptr_t )buffer & PAGE_MASK );
477
+ return (void * )((uintptr_t )buffer -> data & PAGE_MASK );
467
478
}
468
479
469
- static void * buffer_end_page (struct binder_buffer * buffer )
480
+ static void * prev_buffer_end_page (struct binder_buffer * buffer )
470
481
{
471
- return (void * )(((uintptr_t )(buffer + 1 ) - 1 ) & PAGE_MASK );
482
+ return (void * )(((uintptr_t )(buffer -> data ) - 1 ) & PAGE_MASK );
472
483
}
473
484
474
485
static void binder_delete_free_buffer (struct binder_alloc * alloc ,
475
486
struct binder_buffer * buffer )
476
487
{
477
488
struct binder_buffer * prev , * next = NULL ;
478
- int free_page_end = 1 ;
479
- int free_page_start = 1 ;
480
-
489
+ bool to_free = true;
481
490
BUG_ON (alloc -> buffers .next == & buffer -> entry );
482
491
prev = binder_buffer_prev (buffer );
483
492
BUG_ON (!prev -> free );
484
- if (buffer_end_page (prev ) == buffer_start_page (buffer )) {
485
- free_page_start = 0 ;
486
- if (buffer_end_page (prev ) == buffer_end_page (buffer ))
487
- free_page_end = 0 ;
493
+ if (prev_buffer_end_page (prev ) == buffer_start_page (buffer )) {
494
+ to_free = false;
488
495
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
489
- "%d: merge free, buffer %pK share page with %pK\n" ,
490
- alloc -> pid , buffer , prev );
496
+ "%d: merge free, buffer %pK share page with %pK\n" ,
497
+ alloc -> pid , buffer -> data , prev -> data );
491
498
}
492
499
493
500
if (!list_is_last (& buffer -> entry , & alloc -> buffers )) {
494
501
next = binder_buffer_next (buffer );
495
- if (buffer_start_page (next ) == buffer_end_page (buffer )) {
496
- free_page_end = 0 ;
497
- if (buffer_start_page (next ) ==
498
- buffer_start_page (buffer ))
499
- free_page_start = 0 ;
502
+ if (buffer_start_page (next ) == buffer_start_page (buffer )) {
503
+ to_free = false;
500
504
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
501
- "%d: merge free, buffer %pK share page with %pK\n" ,
502
- alloc -> pid , buffer , prev );
505
+ "%d: merge free, buffer %pK share page with %pK\n" ,
506
+ alloc -> pid ,
507
+ buffer -> data ,
508
+ next -> data );
503
509
}
504
510
}
505
- list_del (& buffer -> entry );
506
- if (free_page_start || free_page_end ) {
511
+
512
+ if (PAGE_ALIGNED (buffer -> data )) {
513
+ binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
514
+ "%d: merge free, buffer start %pK is page aligned\n" ,
515
+ alloc -> pid , buffer -> data );
516
+ to_free = false;
517
+ }
518
+
519
+ if (to_free ) {
507
520
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
508
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n" ,
509
- alloc -> pid , buffer , free_page_start ? "" : " end" ,
510
- free_page_end ? "" : " start" , prev , next );
511
- binder_update_page_range (alloc , 0 , free_page_start ?
512
- buffer_start_page (buffer ) : buffer_end_page (buffer ),
513
- (free_page_end ? buffer_end_page (buffer ) :
514
- buffer_start_page (buffer )) + PAGE_SIZE , NULL );
521
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n" ,
522
+ alloc -> pid , buffer -> data ,
523
+ prev -> data , next -> data );
524
+ binder_update_page_range (alloc , 0 , buffer_start_page (buffer ),
525
+ buffer_start_page (buffer ) + PAGE_SIZE ,
526
+ NULL );
515
527
}
528
+ list_del (& buffer -> entry );
529
+ kfree (buffer );
516
530
}
517
531
518
532
static void binder_free_buf_locked (struct binder_alloc * alloc ,
@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
533
547
BUG_ON (buffer -> free );
534
548
BUG_ON (size > buffer_size );
535
549
BUG_ON (buffer -> transaction != NULL );
536
- BUG_ON (( void * ) buffer < alloc -> buffer );
537
- BUG_ON (( void * ) buffer > alloc -> buffer + alloc -> buffer_size );
550
+ BUG_ON (buffer -> data < alloc -> buffer );
551
+ BUG_ON (buffer -> data > alloc -> buffer + alloc -> buffer_size );
538
552
539
553
if (buffer -> async_transaction ) {
540
554
alloc -> free_async_space += size + sizeof (struct binder_buffer );
@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
646
660
}
647
661
alloc -> buffer_size = vma -> vm_end - vma -> vm_start ;
648
662
649
- if ( binder_update_page_range ( alloc , 1 , alloc -> buffer ,
650
- alloc -> buffer + PAGE_SIZE , vma ) ) {
663
+ buffer = kzalloc ( sizeof ( * buffer ), GFP_KERNEL );
664
+ if (! buffer ) {
651
665
ret = - ENOMEM ;
652
- failure_string = "alloc small buf " ;
653
- goto err_alloc_small_buf_failed ;
666
+ failure_string = "alloc buffer struct " ;
667
+ goto err_alloc_buf_struct_failed ;
654
668
}
655
- buffer = alloc -> buffer ;
669
+
670
+ buffer -> data = alloc -> buffer ;
656
671
INIT_LIST_HEAD (& alloc -> buffers );
657
672
list_add (& buffer -> entry , & alloc -> buffers );
658
673
buffer -> free = 1 ;
@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
664
679
665
680
return 0 ;
666
681
667
- err_alloc_small_buf_failed :
682
+ err_alloc_buf_struct_failed :
668
683
kfree (alloc -> pages );
669
684
alloc -> pages = NULL ;
670
685
err_alloc_pages_failed :
@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
684
699
{
685
700
struct rb_node * n ;
686
701
int buffers , page_count ;
702
+ struct binder_buffer * buffer ;
687
703
688
704
BUG_ON (alloc -> vma );
689
705
690
706
buffers = 0 ;
691
707
mutex_lock (& alloc -> mutex );
692
708
while ((n = rb_first (& alloc -> allocated_buffers ))) {
693
- struct binder_buffer * buffer ;
694
-
695
709
buffer = rb_entry (n , struct binder_buffer , rb_node );
696
710
697
711
/* Transaction should already have been freed */
@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
701
715
buffers ++ ;
702
716
}
703
717
718
+ while (!list_empty (& alloc -> buffers )) {
719
+ buffer = list_first_entry (& alloc -> buffers ,
720
+ struct binder_buffer , entry );
721
+ WARN_ON (!buffer -> free );
722
+
723
+ list_del (& buffer -> entry );
724
+ WARN_ON_ONCE (!list_empty (& alloc -> buffers ));
725
+ kfree (buffer );
726
+ }
727
+
704
728
page_count = 0 ;
705
729
if (alloc -> pages ) {
706
730
int i ;
0 commit comments