27
27
#include <linux/vmalloc.h>
28
28
#include <linux/slab.h>
29
29
#include <linux/sched.h>
30
+ #include <linux/list_lru.h>
30
31
#include "binder_alloc.h"
31
32
#include "binder_trace.h"
32
33
34
+ struct list_lru binder_alloc_lru ;
35
+
33
36
static DEFINE_MUTEX (binder_alloc_mmap_lock );
34
37
35
38
enum {
@@ -188,8 +191,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
188
191
{
189
192
void * page_addr ;
190
193
unsigned long user_page_addr ;
191
- struct page * * page ;
192
- struct mm_struct * mm ;
194
+ struct binder_lru_page * page ;
195
+ struct mm_struct * mm = NULL ;
196
+ bool need_mm = false;
193
197
194
198
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
195
199
"%d: %s pages %pK-%pK\n" , alloc -> pid ,
@@ -200,9 +204,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
200
204
201
205
trace_binder_update_page_range (alloc , allocate , start , end );
202
206
203
- if (vma )
204
- mm = NULL ;
205
- else
207
+ if (allocate == 0 )
208
+ goto free_range ;
209
+
210
+ for (page_addr = start ; page_addr < end ; page_addr += PAGE_SIZE ) {
211
+ page = & alloc -> pages [(page_addr - alloc -> buffer ) / PAGE_SIZE ];
212
+ if (!page -> page_ptr ) {
213
+ need_mm = true;
214
+ break ;
215
+ }
216
+ }
217
+
218
+ if (!vma && need_mm )
206
219
mm = get_task_mm (alloc -> tsk );
207
220
208
221
if (mm ) {
@@ -215,29 +228,41 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215
228
}
216
229
}
217
230
218
- if (allocate == 0 )
219
- goto free_range ;
220
-
221
- if (vma == NULL ) {
231
+ if (!vma && need_mm ) {
222
232
pr_err ("%d: binder_alloc_buf failed to map pages in userspace, no vma\n" ,
223
233
alloc -> pid );
224
234
goto err_no_vma ;
225
235
}
226
236
227
237
for (page_addr = start ; page_addr < end ; page_addr += PAGE_SIZE ) {
228
238
int ret ;
239
+ bool on_lru ;
229
240
230
241
page = & alloc -> pages [(page_addr - alloc -> buffer ) / PAGE_SIZE ];
231
242
232
- BUG_ON (* page );
233
- * page = alloc_page (GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO );
234
- if (* page == NULL ) {
243
+ if (page -> page_ptr ) {
244
+ on_lru = list_lru_del (& binder_alloc_lru , & page -> lru );
245
+ WARN_ON (!on_lru );
246
+ continue ;
247
+ }
248
+
249
+ if (WARN_ON (!vma ))
250
+ goto err_page_ptr_cleared ;
251
+
252
+ page -> page_ptr = alloc_page (GFP_KERNEL |
253
+ __GFP_HIGHMEM |
254
+ __GFP_ZERO );
255
+ if (!page -> page_ptr ) {
235
256
pr_err ("%d: binder_alloc_buf failed for page at %pK\n" ,
236
257
alloc -> pid , page_addr );
237
258
goto err_alloc_page_failed ;
238
259
}
260
+ page -> alloc = alloc ;
261
+ INIT_LIST_HEAD (& page -> lru );
262
+
239
263
ret = map_kernel_range_noflush ((unsigned long )page_addr ,
240
- PAGE_SIZE , PAGE_KERNEL , page );
264
+ PAGE_SIZE , PAGE_KERNEL ,
265
+ & page -> page_ptr );
241
266
flush_cache_vmap ((unsigned long )page_addr ,
242
267
(unsigned long )page_addr + PAGE_SIZE );
243
268
if (ret != 1 ) {
@@ -247,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
247
272
}
248
273
user_page_addr =
249
274
(uintptr_t )page_addr + alloc -> user_buffer_offset ;
250
- ret = vm_insert_page (vma , user_page_addr , page [0 ]);
275
+ ret = vm_insert_page (vma , user_page_addr , page [0 ]. page_ptr );
251
276
if (ret ) {
252
277
pr_err ("%d: binder_alloc_buf failed to map page at %lx in userspace\n" ,
253
278
alloc -> pid , user_page_addr );
@@ -264,16 +289,21 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
264
289
free_range :
265
290
for (page_addr = end - PAGE_SIZE ; page_addr >= start ;
266
291
page_addr -= PAGE_SIZE ) {
292
+ bool ret ;
293
+
267
294
page = & alloc -> pages [(page_addr - alloc -> buffer ) / PAGE_SIZE ];
268
- if (vma )
269
- zap_page_range (vma , (uintptr_t )page_addr +
270
- alloc -> user_buffer_offset , PAGE_SIZE );
295
+
296
+ ret = list_lru_add (& binder_alloc_lru , & page -> lru );
297
+ WARN_ON (!ret );
298
+ continue ;
299
+
271
300
err_vm_insert_page_failed :
272
301
unmap_kernel_range ((unsigned long )page_addr , PAGE_SIZE );
273
302
err_map_kernel_failed :
274
- __free_page (* page );
275
- * page = NULL ;
303
+ __free_page (page -> page_ptr );
304
+ page -> page_ptr = NULL ;
276
305
err_alloc_page_failed :
306
+ err_page_ptr_cleared :
277
307
;
278
308
}
279
309
err_no_vma :
@@ -731,16 +761,20 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
731
761
732
762
for (i = 0 ; i < alloc -> buffer_size / PAGE_SIZE ; i ++ ) {
733
763
void * page_addr ;
764
+ bool on_lru ;
734
765
735
- if (!alloc -> pages [i ])
766
+ if (!alloc -> pages [i ]. page_ptr )
736
767
continue ;
737
768
769
+ on_lru = list_lru_del (& binder_alloc_lru ,
770
+ & alloc -> pages [i ].lru );
738
771
page_addr = alloc -> buffer + i * PAGE_SIZE ;
739
772
binder_alloc_debug (BINDER_DEBUG_BUFFER_ALLOC ,
740
- "%s: %d: page %d at %pK not freed\n" ,
741
- __func__ , alloc -> pid , i , page_addr );
773
+ "%s: %d: page %d at %pK %s\n" ,
774
+ __func__ , alloc -> pid , i , page_addr ,
775
+ on_lru ? "on lru" : "active" );
742
776
unmap_kernel_range ((unsigned long )page_addr , PAGE_SIZE );
743
- __free_page (alloc -> pages [i ]);
777
+ __free_page (alloc -> pages [i ]. page_ptr );
744
778
page_count ++ ;
745
779
}
746
780
kfree (alloc -> pages );
@@ -816,6 +850,93 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
816
850
WRITE_ONCE (alloc -> vma_vm_mm , NULL );
817
851
}
818
852
853
+ /**
854
+ * binder_alloc_free_page() - shrinker callback to free pages
855
+ * @item: item to free
856
+ * @lock: lock protecting the item
857
+ * @cb_arg: callback argument
858
+ *
859
+ * Called from list_lru_walk() in binder_shrink_scan() to free
860
+ * up pages when the system is under memory pressure.
861
+ */
862
+ enum lru_status binder_alloc_free_page (struct list_head * item ,
863
+ struct list_lru_one * lru ,
864
+ spinlock_t * lock ,
865
+ void * cb_arg )
866
+ {
867
+ struct mm_struct * mm = NULL ;
868
+ struct binder_lru_page * page = container_of (item ,
869
+ struct binder_lru_page ,
870
+ lru );
871
+ struct binder_alloc * alloc ;
872
+ uintptr_t page_addr ;
873
+ size_t index ;
874
+
875
+ alloc = page -> alloc ;
876
+ if (!mutex_trylock (& alloc -> mutex ))
877
+ goto err_get_alloc_mutex_failed ;
878
+
879
+ if (!page -> page_ptr )
880
+ goto err_page_already_freed ;
881
+
882
+ index = page - alloc -> pages ;
883
+ page_addr = (uintptr_t )alloc -> buffer + index * PAGE_SIZE ;
884
+ if (alloc -> vma ) {
885
+ mm = get_task_mm (alloc -> tsk );
886
+ if (!mm )
887
+ goto err_get_task_mm_failed ;
888
+ if (!down_write_trylock (& mm -> mmap_sem ))
889
+ goto err_down_write_mmap_sem_failed ;
890
+
891
+ zap_page_range (alloc -> vma ,
892
+ page_addr + alloc -> user_buffer_offset ,
893
+ PAGE_SIZE );
894
+
895
+ up_write (& mm -> mmap_sem );
896
+ mmput (mm );
897
+ }
898
+
899
+ unmap_kernel_range (page_addr , PAGE_SIZE );
900
+ __free_page (page -> page_ptr );
901
+ page -> page_ptr = NULL ;
902
+
903
+ list_lru_isolate (lru , item );
904
+
905
+ mutex_unlock (& alloc -> mutex );
906
+ return LRU_REMOVED ;
907
+
908
+ err_down_write_mmap_sem_failed :
909
+ mmput (mm );
910
+ err_get_task_mm_failed :
911
+ err_page_already_freed :
912
+ mutex_unlock (& alloc -> mutex );
913
+ err_get_alloc_mutex_failed :
914
+ return LRU_SKIP ;
915
+ }
916
+
917
+ static unsigned long
918
+ binder_shrink_count (struct shrinker * shrink , struct shrink_control * sc )
919
+ {
920
+ unsigned long ret = list_lru_count (& binder_alloc_lru );
921
+ return ret ;
922
+ }
923
+
924
+ static unsigned long
925
+ binder_shrink_scan (struct shrinker * shrink , struct shrink_control * sc )
926
+ {
927
+ unsigned long ret ;
928
+
929
+ ret = list_lru_walk (& binder_alloc_lru , binder_alloc_free_page ,
930
+ NULL , sc -> nr_to_scan );
931
+ return ret ;
932
+ }
933
+
934
+ struct shrinker binder_shrinker = {
935
+ .count_objects = binder_shrink_count ,
936
+ .scan_objects = binder_shrink_scan ,
937
+ .seeks = DEFAULT_SEEKS ,
938
+ };
939
+
819
940
/**
820
941
* binder_alloc_init() - called by binder_open() for per-proc initialization
821
942
* @alloc: binder_alloc for this proc
@@ -830,3 +951,8 @@ void binder_alloc_init(struct binder_alloc *alloc)
830
951
mutex_init (& alloc -> mutex );
831
952
}
832
953
954
+ void binder_alloc_shrinker_init (void )
955
+ {
956
+ list_lru_init (& binder_alloc_lru );
957
+ register_shrinker (& binder_shrinker );
958
+ }
0 commit comments