27
27
#include <linux/slab.h>
28
28
#include <linux/sched.h>
29
29
#include <linux/list_lru.h>
30
+ #include <linux/ratelimit.h>
30
31
#include <asm/cacheflush.h>
31
32
#include "binder_alloc.h"
32
33
#include "binder_trace.h"
@@ -36,19 +37,20 @@ struct list_lru binder_alloc_lru;
36
37
static DEFINE_MUTEX (binder_alloc_mmap_lock );
37
38
38
39
enum {
40
+ BINDER_DEBUG_USER_ERROR = 1U << 0 ,
39
41
BINDER_DEBUG_OPEN_CLOSE = 1U << 1 ,
40
42
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2 ,
41
43
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3 ,
42
44
};
43
- static uint32_t binder_alloc_debug_mask ;
45
+ static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR ;
44
46
45
47
module_param_named (debug_mask , binder_alloc_debug_mask ,
46
48
uint , 0644 );
47
49
48
50
#define binder_alloc_debug (mask , x ...) \
49
51
do { \
50
52
if (binder_alloc_debug_mask & mask) \
51
- pr_info (x); \
53
+ pr_info_ratelimited (x); \
52
54
} while (0)
53
55
54
56
static struct binder_buffer * binder_buffer_next (struct binder_buffer * buffer )
@@ -152,8 +154,10 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
152
154
* free the buffer twice
153
155
*/
154
156
if (buffer -> free_in_progress ) {
155
- pr_err ("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n" ,
156
- alloc -> pid , current -> pid , (u64 )user_ptr );
157
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
158
+ "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n" ,
159
+ alloc -> pid , current -> pid ,
160
+ (u64 )user_ptr );
157
161
return NULL ;
158
162
}
159
163
buffer -> free_in_progress = 1 ;
@@ -224,8 +228,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
224
228
}
225
229
226
230
if (!vma && need_mm ) {
227
- pr_err ("%d: binder_alloc_buf failed to map pages in userspace, no vma\n" ,
228
- alloc -> pid );
231
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
232
+ "%d: binder_alloc_buf failed to map pages in userspace, no vma\n" ,
233
+ alloc -> pid );
229
234
goto err_no_vma ;
230
235
}
231
236
@@ -344,8 +349,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
344
349
int ret ;
345
350
346
351
if (alloc -> vma == NULL ) {
347
- pr_err ("%d: binder_alloc_buf, no vma\n" ,
348
- alloc -> pid );
352
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
353
+ "%d: binder_alloc_buf, no vma\n" ,
354
+ alloc -> pid );
349
355
return ERR_PTR (- ESRCH );
350
356
}
351
357
@@ -417,11 +423,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
417
423
if (buffer_size > largest_free_size )
418
424
largest_free_size = buffer_size ;
419
425
}
420
- pr_err ("%d: binder_alloc_buf size %zd failed, no address space\n" ,
421
- alloc -> pid , size );
422
- pr_err ("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n" ,
423
- total_alloc_size , allocated_buffers , largest_alloc_size ,
424
- total_free_size , free_buffers , largest_free_size );
426
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
427
+ "%d: binder_alloc_buf size %zd failed, no address space\n" ,
428
+ alloc -> pid , size );
429
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
430
+ "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n" ,
431
+ total_alloc_size , allocated_buffers ,
432
+ largest_alloc_size , total_free_size ,
433
+ free_buffers , largest_free_size );
425
434
return ERR_PTR (- ENOSPC );
426
435
}
427
436
if (n == NULL ) {
@@ -731,8 +740,10 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
731
740
err_get_vm_area_failed :
732
741
err_already_mapped :
733
742
mutex_unlock (& binder_alloc_mmap_lock );
734
- pr_err ("%s: %d %lx-%lx %s failed %d\n" , __func__ ,
735
- alloc -> pid , vma -> vm_start , vma -> vm_end , failure_string , ret );
743
+ binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
744
+ "%s: %d %lx-%lx %s failed %d\n" , __func__ ,
745
+ alloc -> pid , vma -> vm_start , vma -> vm_end ,
746
+ failure_string , ret );
736
747
return ret ;
737
748
}
738
749
0 commit comments