Skip to content

Commit daffc5a

Browse files
Jakub Kicinskiborkmann
authored andcommitted
bpf: hashtab: move attribute validation before allocation
Number of attribute checks are currently performed after hashtab is already allocated. Move them to be able to split them out to the check function later on. Checks have to now be performed on the attr union directly instead of the members of bpf_map, since bpf_map will be allocated later. No functional changes. Signed-off-by: Jakub Kicinski <[email protected]> Reviewed-by: Quentin Monnet <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
1 parent 1110f3a commit daffc5a

File tree

1 file changed

+23
-24
lines changed

1 file changed

+23
-24
lines changed

kernel/bpf/hashtab.c

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
269269
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
270270
return ERR_PTR(-EINVAL);
271271

272+
/* check sanity of attributes.
273+
* value_size == 0 may be allowed in the future to use map as a set
274+
*/
275+
if (attr->max_entries == 0 || attr->key_size == 0 ||
276+
attr->value_size == 0)
277+
return ERR_PTR(-EINVAL);
278+
279+
if (attr->key_size > MAX_BPF_STACK)
280+
/* eBPF programs initialize keys on stack, so they cannot be
281+
* larger than max stack size
282+
*/
283+
return ERR_PTR(-E2BIG);
284+
285+
if (attr->value_size >= KMALLOC_MAX_SIZE -
286+
MAX_BPF_STACK - sizeof(struct htab_elem))
287+
/* if value_size is bigger, the user space won't be able to
288+
* access the elements via bpf syscall. This check also makes
289+
* sure that the elem_size doesn't overflow and it's
290+
* kmalloc-able later in htab_map_update_elem()
291+
*/
292+
return ERR_PTR(-E2BIG);
293+
272294
htab = kzalloc(sizeof(*htab), GFP_USER);
273295
if (!htab)
274296
return ERR_PTR(-ENOMEM);
@@ -281,14 +303,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
281303
htab->map.map_flags = attr->map_flags;
282304
htab->map.numa_node = numa_node;
283305

284-
/* check sanity of attributes.
285-
* value_size == 0 may be allowed in the future to use map as a set
286-
*/
287-
err = -EINVAL;
288-
if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
289-
htab->map.value_size == 0)
290-
goto free_htab;
291-
292306
if (percpu_lru) {
293307
/* ensure each CPU's lru list has >=1 elements.
294308
* since we are at it, make each lru list has the same
@@ -304,29 +318,14 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
304318
/* hash table size must be power of 2 */
305319
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
306320

307-
err = -E2BIG;
308-
if (htab->map.key_size > MAX_BPF_STACK)
309-
/* eBPF programs initialize keys on stack, so they cannot be
310-
* larger than max stack size
311-
*/
312-
goto free_htab;
313-
314-
if (htab->map.value_size >= KMALLOC_MAX_SIZE -
315-
MAX_BPF_STACK - sizeof(struct htab_elem))
316-
/* if value_size is bigger, the user space won't be able to
317-
* access the elements via bpf syscall. This check also makes
318-
* sure that the elem_size doesn't overflow and it's
319-
* kmalloc-able later in htab_map_update_elem()
320-
*/
321-
goto free_htab;
322-
323321
htab->elem_size = sizeof(struct htab_elem) +
324322
round_up(htab->map.key_size, 8);
325323
if (percpu)
326324
htab->elem_size += sizeof(void *);
327325
else
328326
htab->elem_size += round_up(htab->map.value_size, 8);
329327

328+
err = -E2BIG;
330329
/* prevent zero size kmalloc and check for u32 overflow */
331330
if (htab->n_buckets == 0 ||
332331
htab->n_buckets > U32_MAX / sizeof(struct bucket))

0 commit comments

Comments
 (0)