Skip to content

Commit 74310e0

Browse files
Sherry Yanggregkh
authored andcommitted
android: binder: Move buffer out of area shared with user space
Binder driver allocates buffer meta data in a region that is mapped in user space. These meta data contain pointers in the kernel. This patch allocates buffer meta data on the kernel heap that is not mapped in user space, and uses a pointer to refer to the data mapped. Signed-off-by: Sherry Yang <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 4175e2b commit 74310e0

File tree

3 files changed

+90
-67
lines changed

3 files changed

+90
-67
lines changed

drivers/android/binder_alloc.c

Lines changed: 84 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
6262
struct binder_buffer *buffer)
6363
{
6464
if (list_is_last(&buffer->entry, &alloc->buffers))
65-
return alloc->buffer +
66-
alloc->buffer_size - (void *)buffer->data;
67-
return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
65+
return (u8 *)alloc->buffer +
66+
alloc->buffer_size - (u8 *)buffer->data;
67+
return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
6868
}
6969

7070
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
114114
buffer = rb_entry(parent, struct binder_buffer, rb_node);
115115
BUG_ON(buffer->free);
116116

117-
if (new_buffer < buffer)
117+
if (new_buffer->data < buffer->data)
118118
p = &parent->rb_left;
119-
else if (new_buffer > buffer)
119+
else if (new_buffer->data > buffer->data)
120120
p = &parent->rb_right;
121121
else
122122
BUG();
@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
131131
{
132132
struct rb_node *n = alloc->allocated_buffers.rb_node;
133133
struct binder_buffer *buffer;
134-
struct binder_buffer *kern_ptr;
134+
void *kern_ptr;
135135

136-
kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
137-
- offsetof(struct binder_buffer, data));
136+
kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
138137

139138
while (n) {
140139
buffer = rb_entry(n, struct binder_buffer, rb_node);
141140
BUG_ON(buffer->free);
142141

143-
if (kern_ptr < buffer)
142+
if (kern_ptr < buffer->data)
144143
n = n->rb_left;
145-
else if (kern_ptr > buffer)
144+
else if (kern_ptr > buffer->data)
146145
n = n->rb_right;
147146
else {
148147
/*
@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
330329
return ERR_PTR(-ENOSPC);
331330
}
332331

332+
/* Pad 0-size buffers so they get assigned unique addresses */
333+
size = max(size, sizeof(void *));
334+
333335
while (n) {
334336
buffer = rb_entry(n, struct binder_buffer, rb_node);
335337
BUG_ON(!buffer->free);
@@ -389,32 +391,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
389391

390392
has_page_addr =
391393
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
392-
if (n == NULL) {
393-
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
394-
buffer_size = size; /* no room for other buffers */
395-
else
396-
buffer_size = size + sizeof(struct binder_buffer);
397-
}
394+
WARN_ON(n && buffer_size != size);
398395
end_page_addr =
399-
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
396+
(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
400397
if (end_page_addr > has_page_addr)
401398
end_page_addr = has_page_addr;
402399
ret = binder_update_page_range(alloc, 1,
403400
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
404401
if (ret)
405402
return ERR_PTR(ret);
406403

407-
rb_erase(best_fit, &alloc->free_buffers);
408-
buffer->free = 0;
409-
buffer->free_in_progress = 0;
410-
binder_insert_allocated_buffer_locked(alloc, buffer);
411404
if (buffer_size != size) {
412-
struct binder_buffer *new_buffer = (void *)buffer->data + size;
405+
struct binder_buffer *new_buffer;
413406

407+
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
408+
if (!new_buffer) {
409+
pr_err("%s: %d failed to alloc new buffer struct\n",
410+
__func__, alloc->pid);
411+
goto err_alloc_buf_struct_failed;
412+
}
413+
new_buffer->data = (u8 *)buffer->data + size;
414414
list_add(&new_buffer->entry, &buffer->entry);
415415
new_buffer->free = 1;
416416
binder_insert_free_buffer(alloc, new_buffer);
417417
}
418+
419+
rb_erase(best_fit, &alloc->free_buffers);
420+
buffer->free = 0;
421+
buffer->free_in_progress = 0;
422+
binder_insert_allocated_buffer_locked(alloc, buffer);
418423
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
419424
"%d: binder_alloc_buf size %zd got %pK\n",
420425
alloc->pid, size, buffer);
@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
429434
alloc->pid, size, alloc->free_async_space);
430435
}
431436
return buffer;
437+
438+
err_alloc_buf_struct_failed:
439+
binder_update_page_range(alloc, 0,
440+
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
441+
end_page_addr, NULL);
442+
return ERR_PTR(-ENOMEM);
432443
}
433444

434445
/**
@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
463474

464475
static void *buffer_start_page(struct binder_buffer *buffer)
465476
{
466-
return (void *)((uintptr_t)buffer & PAGE_MASK);
477+
return (void *)((uintptr_t)buffer->data & PAGE_MASK);
467478
}
468479

469-
static void *buffer_end_page(struct binder_buffer *buffer)
480+
static void *prev_buffer_end_page(struct binder_buffer *buffer)
470481
{
471-
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
482+
return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
472483
}
473484

474485
static void binder_delete_free_buffer(struct binder_alloc *alloc,
475486
struct binder_buffer *buffer)
476487
{
477488
struct binder_buffer *prev, *next = NULL;
478-
int free_page_end = 1;
479-
int free_page_start = 1;
480-
489+
bool to_free = true;
481490
BUG_ON(alloc->buffers.next == &buffer->entry);
482491
prev = binder_buffer_prev(buffer);
483492
BUG_ON(!prev->free);
484-
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
485-
free_page_start = 0;
486-
if (buffer_end_page(prev) == buffer_end_page(buffer))
487-
free_page_end = 0;
493+
if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
494+
to_free = false;
488495
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
489-
"%d: merge free, buffer %pK share page with %pK\n",
490-
alloc->pid, buffer, prev);
496+
"%d: merge free, buffer %pK share page with %pK\n",
497+
alloc->pid, buffer->data, prev->data);
491498
}
492499

493500
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
494501
next = binder_buffer_next(buffer);
495-
if (buffer_start_page(next) == buffer_end_page(buffer)) {
496-
free_page_end = 0;
497-
if (buffer_start_page(next) ==
498-
buffer_start_page(buffer))
499-
free_page_start = 0;
502+
if (buffer_start_page(next) == buffer_start_page(buffer)) {
503+
to_free = false;
500504
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
501-
"%d: merge free, buffer %pK share page with %pK\n",
502-
alloc->pid, buffer, prev);
505+
"%d: merge free, buffer %pK share page with %pK\n",
506+
alloc->pid,
507+
buffer->data,
508+
next->data);
503509
}
504510
}
505-
list_del(&buffer->entry);
506-
if (free_page_start || free_page_end) {
511+
512+
if (PAGE_ALIGNED(buffer->data)) {
513+
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
514+
"%d: merge free, buffer start %pK is page aligned\n",
515+
alloc->pid, buffer->data);
516+
to_free = false;
517+
}
518+
519+
if (to_free) {
507520
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
508-
"%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
509-
alloc->pid, buffer, free_page_start ? "" : " end",
510-
free_page_end ? "" : " start", prev, next);
511-
binder_update_page_range(alloc, 0, free_page_start ?
512-
buffer_start_page(buffer) : buffer_end_page(buffer),
513-
(free_page_end ? buffer_end_page(buffer) :
514-
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
521+
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
522+
alloc->pid, buffer->data,
523+
prev->data, next->data);
524+
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
525+
buffer_start_page(buffer) + PAGE_SIZE,
526+
NULL);
515527
}
528+
list_del(&buffer->entry);
529+
kfree(buffer);
516530
}
517531

518532
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
533547
BUG_ON(buffer->free);
534548
BUG_ON(size > buffer_size);
535549
BUG_ON(buffer->transaction != NULL);
536-
BUG_ON((void *)buffer < alloc->buffer);
537-
BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
550+
BUG_ON(buffer->data < alloc->buffer);
551+
BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
538552

539553
if (buffer->async_transaction) {
540554
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
646660
}
647661
alloc->buffer_size = vma->vm_end - vma->vm_start;
648662

649-
if (binder_update_page_range(alloc, 1, alloc->buffer,
650-
alloc->buffer + PAGE_SIZE, vma)) {
663+
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
664+
if (!buffer) {
651665
ret = -ENOMEM;
652-
failure_string = "alloc small buf";
653-
goto err_alloc_small_buf_failed;
666+
failure_string = "alloc buffer struct";
667+
goto err_alloc_buf_struct_failed;
654668
}
655-
buffer = alloc->buffer;
669+
670+
buffer->data = alloc->buffer;
656671
INIT_LIST_HEAD(&alloc->buffers);
657672
list_add(&buffer->entry, &alloc->buffers);
658673
buffer->free = 1;
@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
664679

665680
return 0;
666681

667-
err_alloc_small_buf_failed:
682+
err_alloc_buf_struct_failed:
668683
kfree(alloc->pages);
669684
alloc->pages = NULL;
670685
err_alloc_pages_failed:
@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
684699
{
685700
struct rb_node *n;
686701
int buffers, page_count;
702+
struct binder_buffer *buffer;
687703

688704
BUG_ON(alloc->vma);
689705

690706
buffers = 0;
691707
mutex_lock(&alloc->mutex);
692708
while ((n = rb_first(&alloc->allocated_buffers))) {
693-
struct binder_buffer *buffer;
694-
695709
buffer = rb_entry(n, struct binder_buffer, rb_node);
696710

697711
/* Transaction should already have been freed */
@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
701715
buffers++;
702716
}
703717

718+
while (!list_empty(&alloc->buffers)) {
719+
buffer = list_first_entry(&alloc->buffers,
720+
struct binder_buffer, entry);
721+
WARN_ON(!buffer->free);
722+
723+
list_del(&buffer->entry);
724+
WARN_ON_ONCE(!list_empty(&alloc->buffers));
725+
kfree(buffer);
726+
}
727+
704728
page_count = 0;
705729
if (alloc->pages) {
706730
int i;

drivers/android/binder_alloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ struct binder_buffer {
5757
size_t data_size;
5858
size_t offsets_size;
5959
size_t extra_buffers_size;
60-
uint8_t data[0];
60+
void *data;
6161
};
6262

6363
/**

drivers/android/binder_alloc_selftest.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
105105
void *page_addr, *end;
106106
int page_index;
107107

108-
end = (void *)PAGE_ALIGN((uintptr_t)buffer + size);
109-
for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) {
108+
end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
109+
page_addr = buffer->data;
110+
for (; page_addr < end; page_addr += PAGE_SIZE) {
110111
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
111112
if (!alloc->pages[page_index]) {
112113
pr_err("incorrect alloc state at page index %d\n",
@@ -209,8 +210,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
209210
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
210211
* we need one giant buffer before getting to the last page.
211212
*/
212-
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]
213-
- sizeof(struct binder_buffer) * BUFFER_NUM;
213+
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
214214
binder_selftest_free_seq(alloc, front_sizes, seq, 0);
215215
binder_selftest_free_seq(alloc, back_sizes, seq, 0);
216216
}
@@ -228,8 +228,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
228228
prev = index == 0 ? 0 : end_offset[index - 1];
229229
end = prev;
230230

231-
BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer))
232-
* BUFFER_NUM >= PAGE_SIZE);
231+
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
233232

234233
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
235234
if (align % 2)

0 commit comments

Comments
 (0)