@@ -49,27 +49,35 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
49
49
}
50
50
51
51
/* Called from syscall */
52
- static struct bpf_map * array_map_alloc (union bpf_attr * attr )
52
+ static int array_map_alloc_check (union bpf_attr * attr )
53
53
{
54
54
bool percpu = attr -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ;
55
55
int numa_node = bpf_map_attr_numa_node (attr );
56
- u32 elem_size , index_mask , max_entries ;
57
- bool unpriv = !capable (CAP_SYS_ADMIN );
58
- struct bpf_array * array ;
59
- u64 array_size , mask64 ;
60
56
61
57
/* check sanity of attributes */
62
58
if (attr -> max_entries == 0 || attr -> key_size != 4 ||
63
59
attr -> value_size == 0 ||
64
60
attr -> map_flags & ~ARRAY_CREATE_FLAG_MASK ||
65
61
(percpu && numa_node != NUMA_NO_NODE ))
66
- return ERR_PTR ( - EINVAL ) ;
62
+ return - EINVAL ;
67
63
68
64
if (attr -> value_size > KMALLOC_MAX_SIZE )
69
65
/* if value_size is bigger, the user space won't be able to
70
66
* access the elements.
71
67
*/
72
- return ERR_PTR (- E2BIG );
68
+ return - E2BIG ;
69
+
70
+ return 0 ;
71
+ }
72
+
73
+ static struct bpf_map * array_map_alloc (union bpf_attr * attr )
74
+ {
75
+ bool percpu = attr -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ;
76
+ int numa_node = bpf_map_attr_numa_node (attr );
77
+ u32 elem_size , index_mask , max_entries ;
78
+ bool unpriv = !capable (CAP_SYS_ADMIN );
79
+ struct bpf_array * array ;
80
+ u64 array_size , mask64 ;
73
81
74
82
elem_size = round_up (attr -> value_size , 8 );
75
83
@@ -327,6 +335,7 @@ static void array_map_free(struct bpf_map *map)
327
335
}
328
336
329
337
const struct bpf_map_ops array_map_ops = {
338
+ .map_alloc_check = array_map_alloc_check ,
330
339
.map_alloc = array_map_alloc ,
331
340
.map_free = array_map_free ,
332
341
.map_get_next_key = array_map_get_next_key ,
@@ -337,6 +346,7 @@ const struct bpf_map_ops array_map_ops = {
337
346
};
338
347
339
348
const struct bpf_map_ops percpu_array_map_ops = {
349
+ .map_alloc_check = array_map_alloc_check ,
340
350
.map_alloc = array_map_alloc ,
341
351
.map_free = array_map_free ,
342
352
.map_get_next_key = array_map_get_next_key ,
@@ -345,12 +355,12 @@ const struct bpf_map_ops percpu_array_map_ops = {
345
355
.map_delete_elem = array_map_delete_elem ,
346
356
};
347
357
348
- static struct bpf_map * fd_array_map_alloc (union bpf_attr * attr )
358
+ static int fd_array_map_alloc_check (union bpf_attr * attr )
349
359
{
350
360
/* only file descriptors can be stored in this type of map */
351
361
if (attr -> value_size != sizeof (u32 ))
352
- return ERR_PTR ( - EINVAL ) ;
353
- return array_map_alloc (attr );
362
+ return - EINVAL ;
363
+ return array_map_alloc_check (attr );
354
364
}
355
365
356
366
static void fd_array_map_free (struct bpf_map * map )
@@ -474,7 +484,8 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
474
484
}
475
485
476
486
const struct bpf_map_ops prog_array_map_ops = {
477
- .map_alloc = fd_array_map_alloc ,
487
+ .map_alloc_check = fd_array_map_alloc_check ,
488
+ .map_alloc = array_map_alloc ,
478
489
.map_free = fd_array_map_free ,
479
490
.map_get_next_key = array_map_get_next_key ,
480
491
.map_lookup_elem = fd_array_map_lookup_elem ,
@@ -561,7 +572,8 @@ static void perf_event_fd_array_release(struct bpf_map *map,
561
572
}
562
573
563
574
const struct bpf_map_ops perf_event_array_map_ops = {
564
- .map_alloc = fd_array_map_alloc ,
575
+ .map_alloc_check = fd_array_map_alloc_check ,
576
+ .map_alloc = array_map_alloc ,
565
577
.map_free = fd_array_map_free ,
566
578
.map_get_next_key = array_map_get_next_key ,
567
579
.map_lookup_elem = fd_array_map_lookup_elem ,
@@ -592,7 +604,8 @@ static void cgroup_fd_array_free(struct bpf_map *map)
592
604
}
593
605
594
606
const struct bpf_map_ops cgroup_array_map_ops = {
595
- .map_alloc = fd_array_map_alloc ,
607
+ .map_alloc_check = fd_array_map_alloc_check ,
608
+ .map_alloc = array_map_alloc ,
596
609
.map_free = cgroup_fd_array_free ,
597
610
.map_get_next_key = array_map_get_next_key ,
598
611
.map_lookup_elem = fd_array_map_lookup_elem ,
@@ -610,7 +623,7 @@ static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
610
623
if (IS_ERR (inner_map_meta ))
611
624
return inner_map_meta ;
612
625
613
- map = fd_array_map_alloc (attr );
626
+ map = array_map_alloc (attr );
614
627
if (IS_ERR (map )) {
615
628
bpf_map_meta_free (inner_map_meta );
616
629
return map ;
@@ -673,6 +686,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
673
686
}
674
687
675
688
const struct bpf_map_ops array_of_maps_map_ops = {
689
+ .map_alloc_check = fd_array_map_alloc_check ,
676
690
.map_alloc = array_of_map_alloc ,
677
691
.map_free = array_of_map_free ,
678
692
.map_get_next_key = array_map_get_next_key ,
0 commit comments