Skip to content

Commit f2517eb

Browse files
Sherry Yanggregkh
authored andcommitted
android: binder: Add global lru shrinker to binder
Hold on to the pages allocated and mapped for transaction buffers until the system is under memory pressure. When that happens, use linux shrinker to free pages. Without using shrinker, patch "android: binder: Move buffer out of area shared with user space" will cause a significant slow down for small transactions that fit into the first page because free list buffer header used to be inlined with buffer data. In addition to prevent the performance regression for small transactions, this patch improves the performance for transactions that take up more than one page. Modify alloc selftest to work with the shrinker change. Test: Run memory intensive applications (Chrome and Camera) to trigger shrinker callbacks. Binder frees memory as expected. Test: Run binderThroughputTest with high memory pressure option enabled. Signed-off-by: Sherry Yang <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 74310e0 commit f2517eb

File tree

4 files changed

+225
-40
lines changed

4 files changed

+225
-40
lines changed

drivers/android/binder.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5243,6 +5243,8 @@ static int __init binder_init(void)
52435243
struct binder_device *device;
52445244
struct hlist_node *tmp;
52455245

5246+
binder_alloc_shrinker_init();
5247+
52465248
atomic_set(&binder_transaction_log.cur, ~0U);
52475249
atomic_set(&binder_transaction_log_failed.cur, ~0U);
52485250

drivers/android/binder_alloc.c

Lines changed: 149 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,12 @@
2727
#include <linux/vmalloc.h>
2828
#include <linux/slab.h>
2929
#include <linux/sched.h>
30+
#include <linux/list_lru.h>
3031
#include "binder_alloc.h"
3132
#include "binder_trace.h"
3233

34+
struct list_lru binder_alloc_lru;
35+
3336
static DEFINE_MUTEX(binder_alloc_mmap_lock);
3437

3538
enum {
@@ -188,8 +191,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
188191
{
189192
void *page_addr;
190193
unsigned long user_page_addr;
191-
struct page **page;
192-
struct mm_struct *mm;
194+
struct binder_lru_page *page;
195+
struct mm_struct *mm = NULL;
196+
bool need_mm = false;
193197

194198
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
195199
"%d: %s pages %pK-%pK\n", alloc->pid,
@@ -200,9 +204,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
200204

201205
trace_binder_update_page_range(alloc, allocate, start, end);
202206

203-
if (vma)
204-
mm = NULL;
205-
else
207+
if (allocate == 0)
208+
goto free_range;
209+
210+
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
211+
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
212+
if (!page->page_ptr) {
213+
need_mm = true;
214+
break;
215+
}
216+
}
217+
218+
if (!vma && need_mm)
206219
mm = get_task_mm(alloc->tsk);
207220

208221
if (mm) {
@@ -215,29 +228,41 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215228
}
216229
}
217230

218-
if (allocate == 0)
219-
goto free_range;
220-
221-
if (vma == NULL) {
231+
if (!vma && need_mm) {
222232
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223233
alloc->pid);
224234
goto err_no_vma;
225235
}
226236

227237
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228238
int ret;
239+
bool on_lru;
229240

230241
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
231242

232-
BUG_ON(*page);
233-
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
234-
if (*page == NULL) {
243+
if (page->page_ptr) {
244+
on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
245+
WARN_ON(!on_lru);
246+
continue;
247+
}
248+
249+
if (WARN_ON(!vma))
250+
goto err_page_ptr_cleared;
251+
252+
page->page_ptr = alloc_page(GFP_KERNEL |
253+
__GFP_HIGHMEM |
254+
__GFP_ZERO);
255+
if (!page->page_ptr) {
235256
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
236257
alloc->pid, page_addr);
237258
goto err_alloc_page_failed;
238259
}
260+
page->alloc = alloc;
261+
INIT_LIST_HEAD(&page->lru);
262+
239263
ret = map_kernel_range_noflush((unsigned long)page_addr,
240-
PAGE_SIZE, PAGE_KERNEL, page);
264+
PAGE_SIZE, PAGE_KERNEL,
265+
&page->page_ptr);
241266
flush_cache_vmap((unsigned long)page_addr,
242267
(unsigned long)page_addr + PAGE_SIZE);
243268
if (ret != 1) {
@@ -247,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
247272
}
248273
user_page_addr =
249274
(uintptr_t)page_addr + alloc->user_buffer_offset;
250-
ret = vm_insert_page(vma, user_page_addr, page[0]);
275+
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
251276
if (ret) {
252277
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
253278
alloc->pid, user_page_addr);
@@ -264,16 +289,21 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
264289
free_range:
265290
for (page_addr = end - PAGE_SIZE; page_addr >= start;
266291
page_addr -= PAGE_SIZE) {
292+
bool ret;
293+
267294
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
268-
if (vma)
269-
zap_page_range(vma, (uintptr_t)page_addr +
270-
alloc->user_buffer_offset, PAGE_SIZE);
295+
296+
ret = list_lru_add(&binder_alloc_lru, &page->lru);
297+
WARN_ON(!ret);
298+
continue;
299+
271300
err_vm_insert_page_failed:
272301
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
273302
err_map_kernel_failed:
274-
__free_page(*page);
275-
*page = NULL;
303+
__free_page(page->page_ptr);
304+
page->page_ptr = NULL;
276305
err_alloc_page_failed:
306+
err_page_ptr_cleared:
277307
;
278308
}
279309
err_no_vma:
@@ -731,16 +761,20 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
731761

732762
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
733763
void *page_addr;
764+
bool on_lru;
734765

735-
if (!alloc->pages[i])
766+
if (!alloc->pages[i].page_ptr)
736767
continue;
737768

769+
on_lru = list_lru_del(&binder_alloc_lru,
770+
&alloc->pages[i].lru);
738771
page_addr = alloc->buffer + i * PAGE_SIZE;
739772
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
740-
"%s: %d: page %d at %pK not freed\n",
741-
__func__, alloc->pid, i, page_addr);
773+
"%s: %d: page %d at %pK %s\n",
774+
__func__, alloc->pid, i, page_addr,
775+
on_lru ? "on lru" : "active");
742776
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
743-
__free_page(alloc->pages[i]);
777+
__free_page(alloc->pages[i].page_ptr);
744778
page_count++;
745779
}
746780
kfree(alloc->pages);
@@ -816,6 +850,93 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
816850
WRITE_ONCE(alloc->vma_vm_mm, NULL);
817851
}
818852

853+
/**
854+
* binder_alloc_free_page() - shrinker callback to free pages
855+
* @item: item to free
856+
* @lock: lock protecting the item
857+
* @cb_arg: callback argument
858+
*
859+
* Called from list_lru_walk() in binder_shrink_scan() to free
860+
* up pages when the system is under memory pressure.
861+
*/
862+
enum lru_status binder_alloc_free_page(struct list_head *item,
863+
struct list_lru_one *lru,
864+
spinlock_t *lock,
865+
void *cb_arg)
866+
{
867+
struct mm_struct *mm = NULL;
868+
struct binder_lru_page *page = container_of(item,
869+
struct binder_lru_page,
870+
lru);
871+
struct binder_alloc *alloc;
872+
uintptr_t page_addr;
873+
size_t index;
874+
875+
alloc = page->alloc;
876+
if (!mutex_trylock(&alloc->mutex))
877+
goto err_get_alloc_mutex_failed;
878+
879+
if (!page->page_ptr)
880+
goto err_page_already_freed;
881+
882+
index = page - alloc->pages;
883+
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
884+
if (alloc->vma) {
885+
mm = get_task_mm(alloc->tsk);
886+
if (!mm)
887+
goto err_get_task_mm_failed;
888+
if (!down_write_trylock(&mm->mmap_sem))
889+
goto err_down_write_mmap_sem_failed;
890+
891+
zap_page_range(alloc->vma,
892+
page_addr + alloc->user_buffer_offset,
893+
PAGE_SIZE);
894+
895+
up_write(&mm->mmap_sem);
896+
mmput(mm);
897+
}
898+
899+
unmap_kernel_range(page_addr, PAGE_SIZE);
900+
__free_page(page->page_ptr);
901+
page->page_ptr = NULL;
902+
903+
list_lru_isolate(lru, item);
904+
905+
mutex_unlock(&alloc->mutex);
906+
return LRU_REMOVED;
907+
908+
err_down_write_mmap_sem_failed:
909+
mmput(mm);
910+
err_get_task_mm_failed:
911+
err_page_already_freed:
912+
mutex_unlock(&alloc->mutex);
913+
err_get_alloc_mutex_failed:
914+
return LRU_SKIP;
915+
}
916+
917+
static unsigned long
918+
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
919+
{
920+
unsigned long ret = list_lru_count(&binder_alloc_lru);
921+
return ret;
922+
}
923+
924+
static unsigned long
925+
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
926+
{
927+
unsigned long ret;
928+
929+
ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
930+
NULL, sc->nr_to_scan);
931+
return ret;
932+
}
933+
934+
struct shrinker binder_shrinker = {
935+
.count_objects = binder_shrink_count,
936+
.scan_objects = binder_shrink_scan,
937+
.seeks = DEFAULT_SEEKS,
938+
};
939+
819940
/**
820941
* binder_alloc_init() - called by binder_open() for per-proc initialization
821942
* @alloc: binder_alloc for this proc
@@ -830,3 +951,8 @@ void binder_alloc_init(struct binder_alloc *alloc)
830951
mutex_init(&alloc->mutex);
831952
}
832953

954+
void binder_alloc_shrinker_init(void)
955+
{
956+
list_lru_init(&binder_alloc_lru);
957+
register_shrinker(&binder_shrinker);
958+
}

drivers/android/binder_alloc.h

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,9 @@
2121
#include <linux/rtmutex.h>
2222
#include <linux/vmalloc.h>
2323
#include <linux/slab.h>
24+
#include <linux/list_lru.h>
2425

26+
extern struct list_lru binder_alloc_lru;
2527
struct binder_transaction;
2628

2729
/**
@@ -60,6 +62,18 @@ struct binder_buffer {
6062
void *data;
6163
};
6264

65+
/**
66+
* struct binder_lru_page - page object used for binder shrinker
67+
* @page_ptr: pointer to physical page in mmap'd space
68+
* @lru: entry in binder_alloc_lru
69+
* @alloc: binder_alloc for a proc
70+
*/
71+
struct binder_lru_page {
72+
struct list_head lru;
73+
struct page *page_ptr;
74+
struct binder_alloc *alloc;
75+
};
76+
6377
/**
6478
* struct binder_alloc - per-binder proc state for binder allocator
6579
* @vma: vm_area_struct passed to mmap_handler
@@ -75,8 +89,7 @@ struct binder_buffer {
7589
* @allocated_buffers: rb tree of allocated buffers sorted by address
7690
* @free_async_space: VA space available for async buffers. This is
7791
* initialized at mmap time to 1/2 the full VA space
78-
* @pages: array of physical page addresses for each
79-
* page of mmap'd space
92+
* @pages: array of binder_lru_page
8093
* @buffer_size: size of address space specified via mmap
8194
* @pid: pid for associated binder_proc (invariant after init)
8295
*
@@ -96,7 +109,7 @@ struct binder_alloc {
96109
struct rb_root free_buffers;
97110
struct rb_root allocated_buffers;
98111
size_t free_async_space;
99-
struct page **pages;
112+
struct binder_lru_page *pages;
100113
size_t buffer_size;
101114
uint32_t buffer_free;
102115
int pid;
@@ -107,12 +120,16 @@ void binder_selftest_alloc(struct binder_alloc *alloc);
107120
#else
108121
static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
109122
#endif
123+
enum lru_status binder_alloc_free_page(struct list_head *item,
124+
struct list_lru_one *lru,
125+
spinlock_t *lock, void *cb_arg);
110126
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
111127
size_t data_size,
112128
size_t offsets_size,
113129
size_t extra_buffers_size,
114130
int is_async);
115131
extern void binder_alloc_init(struct binder_alloc *alloc);
132+
void binder_alloc_shrinker_init(void);
116133
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
117134
extern struct binder_buffer *
118135
binder_alloc_prepare_to_free(struct binder_alloc *alloc,

0 commit comments

Comments
 (0)