@@ -318,9 +318,10 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
318
318
*/
319
319
enum data_mode {
320
320
DATA_MODE_SLAB = 0 ,
321
- DATA_MODE_GET_FREE_PAGES = 1 ,
322
- DATA_MODE_VMALLOC = 2 ,
323
- DATA_MODE_LIMIT = 3
321
+ DATA_MODE_KMALLOC = 1 ,
322
+ DATA_MODE_GET_FREE_PAGES = 2 ,
323
+ DATA_MODE_VMALLOC = 3 ,
324
+ DATA_MODE_LIMIT = 4
324
325
};
325
326
326
327
struct dm_buffer {
@@ -1062,6 +1063,7 @@ static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1062
1063
1063
1064
static unsigned long dm_bufio_peak_allocated ;
1064
1065
static unsigned long dm_bufio_allocated_kmem_cache ;
1066
+ static unsigned long dm_bufio_allocated_kmalloc ;
1065
1067
static unsigned long dm_bufio_allocated_get_free_pages ;
1066
1068
static unsigned long dm_bufio_allocated_vmalloc ;
1067
1069
static unsigned long dm_bufio_current_allocated ;
@@ -1104,6 +1106,7 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1104
1106
1105
1107
static unsigned long * const class_ptr [DATA_MODE_LIMIT ] = {
1106
1108
& dm_bufio_allocated_kmem_cache ,
1109
+ & dm_bufio_allocated_kmalloc ,
1107
1110
& dm_bufio_allocated_get_free_pages ,
1108
1111
& dm_bufio_allocated_vmalloc ,
1109
1112
};
@@ -1181,6 +1184,11 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1181
1184
return kmem_cache_alloc (c -> slab_cache , gfp_mask );
1182
1185
}
1183
1186
1187
+ if (unlikely (c -> block_size < PAGE_SIZE )) {
1188
+ * data_mode = DATA_MODE_KMALLOC ;
1189
+ return kmalloc (c -> block_size , gfp_mask | __GFP_RECLAIMABLE );
1190
+ }
1191
+
1184
1192
if (c -> block_size <= KMALLOC_MAX_SIZE &&
1185
1193
gfp_mask & __GFP_NORETRY ) {
1186
1194
* data_mode = DATA_MODE_GET_FREE_PAGES ;
@@ -1204,6 +1212,10 @@ static void free_buffer_data(struct dm_bufio_client *c,
1204
1212
kmem_cache_free (c -> slab_cache , data );
1205
1213
break ;
1206
1214
1215
+ case DATA_MODE_KMALLOC :
1216
+ kfree (data );
1217
+ break ;
1218
+
1207
1219
case DATA_MODE_GET_FREE_PAGES :
1208
1220
free_pages ((unsigned long )data ,
1209
1221
c -> sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT ));
@@ -2519,8 +2531,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
2519
2531
goto bad_dm_io ;
2520
2532
}
2521
2533
2522
- if (block_size <= KMALLOC_MAX_SIZE &&
2523
- (block_size < PAGE_SIZE || !is_power_of_2 (block_size ))) {
2534
+ if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2 (block_size )) {
2524
2535
unsigned int align = min (1U << __ffs (block_size ), (unsigned int )PAGE_SIZE );
2525
2536
2526
2537
snprintf (slab_name , sizeof (slab_name ), "dm_bufio_cache-%u-%u" ,
@@ -2902,6 +2913,7 @@ static int __init dm_bufio_init(void)
2902
2913
__u64 mem ;
2903
2914
2904
2915
dm_bufio_allocated_kmem_cache = 0 ;
2916
+ dm_bufio_allocated_kmalloc = 0 ;
2905
2917
dm_bufio_allocated_get_free_pages = 0 ;
2906
2918
dm_bufio_allocated_vmalloc = 0 ;
2907
2919
dm_bufio_current_allocated = 0 ;
@@ -2990,6 +3002,9 @@ MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2990
3002
module_param_named (allocated_kmem_cache_bytes , dm_bufio_allocated_kmem_cache , ulong , 0444 );
2991
3003
MODULE_PARM_DESC (allocated_kmem_cache_bytes , "Memory allocated with kmem_cache_alloc" );
2992
3004
3005
+ module_param_named (allocated_kmalloc_bytes , dm_bufio_allocated_kmalloc , ulong , 0444 );
3006
+ MODULE_PARM_DESC (allocated_kmalloc_bytes , "Memory allocated with kmalloc_alloc" );
3007
+
2993
3008
module_param_named (allocated_get_free_pages_bytes , dm_bufio_allocated_get_free_pages , ulong , 0444 );
2994
3009
MODULE_PARM_DESC (allocated_get_free_pages_bytes , "Memory allocated with get_free_pages" );
2995
3010
0 commit comments