Skip to content

Commit 50bbfed

Browse files
4astdavem330
authored andcommitted
bpf: track stack depth of classic bpf programs
To track stack depth of classic bpf programs we only need to analyze ST|STX instructions, since check_load_and_stores() verifies that programs can load from stack only after write. We also need to change the way cBPF stack slots map to eBPF stack, since typical classic programs are using slots 0 and 1, so they need to map to stack offsets -4 and -8 respectively in order to take advantage of small stack interpreter and JITs. Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 80a58d0 commit 50bbfed

File tree

1 file changed

+22
-14
lines changed

1 file changed

+22
-14
lines changed

net/core/filter.c

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
352352
* bpf_convert_filter - convert filter program
353353
* @prog: the user passed filter program
354354
* @len: the length of the user passed filter program
355-
* @new_prog: buffer where converted program will be stored
355+
* @new_prog: allocated 'struct bpf_prog' or NULL
356356
* @new_len: pointer to store length of converted program
357357
*
358358
* Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
@@ -364,14 +364,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
364364
*
365365
* 2) 2nd pass to remap in two passes: 1st pass finds new
366366
* jump offsets, 2nd pass remapping:
367-
* new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
368367
* bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
369368
*/
370369
static int bpf_convert_filter(struct sock_filter *prog, int len,
371-
struct bpf_insn *new_prog, int *new_len)
370+
struct bpf_prog *new_prog, int *new_len)
372371
{
373-
int new_flen = 0, pass = 0, target, i;
374-
struct bpf_insn *new_insn;
372+
int new_flen = 0, pass = 0, target, i, stack_off;
373+
struct bpf_insn *new_insn, *first_insn = NULL;
375374
struct sock_filter *fp;
376375
int *addrs = NULL;
377376
u8 bpf_src;
@@ -383,18 +382,19 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
383382
return -EINVAL;
384383

385384
if (new_prog) {
385+
first_insn = new_prog->insnsi;
386386
addrs = kcalloc(len, sizeof(*addrs),
387387
GFP_KERNEL | __GFP_NOWARN);
388388
if (!addrs)
389389
return -ENOMEM;
390390
}
391391

392392
do_pass:
393-
new_insn = new_prog;
393+
new_insn = first_insn;
394394
fp = prog;
395395

396396
/* Classic BPF related prologue emission. */
397-
if (new_insn) {
397+
if (new_prog) {
398398
/* Classic BPF expects A and X to be reset first. These need
399399
* to be guaranteed to be the first two instructions.
400400
*/
@@ -415,7 +415,7 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
415415
struct bpf_insn *insn = tmp_insns;
416416

417417
if (addrs)
418-
addrs[i] = new_insn - new_prog;
418+
addrs[i] = new_insn - first_insn;
419419

420420
switch (fp->code) {
421421
/* All arithmetic insns and skb loads map as-is. */
@@ -561,17 +561,25 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
561561
/* Store to stack. */
562562
case BPF_ST:
563563
case BPF_STX:
564+
stack_off = fp->k * 4 + 4;
564565
*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
565566
BPF_ST ? BPF_REG_A : BPF_REG_X,
566-
-(BPF_MEMWORDS - fp->k) * 4);
567+
-stack_off);
568+
/* check_load_and_stores() verifies that classic BPF can
569+
* load from stack only after write, so tracking
570+
* stack_depth for ST|STX insns is enough
571+
*/
572+
if (new_prog && new_prog->aux->stack_depth < stack_off)
573+
new_prog->aux->stack_depth = stack_off;
567574
break;
568575

569576
/* Load from stack. */
570577
case BPF_LD | BPF_MEM:
571578
case BPF_LDX | BPF_MEM:
579+
stack_off = fp->k * 4 + 4;
572580
*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
573581
BPF_REG_A : BPF_REG_X, BPF_REG_FP,
574-
-(BPF_MEMWORDS - fp->k) * 4);
582+
-stack_off);
575583
break;
576584

577585
/* A = K or X = K */
@@ -619,13 +627,13 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
619627

620628
if (!new_prog) {
621629
/* Only calculating new length. */
622-
*new_len = new_insn - new_prog;
630+
*new_len = new_insn - first_insn;
623631
return 0;
624632
}
625633

626634
pass++;
627-
if (new_flen != new_insn - new_prog) {
628-
new_flen = new_insn - new_prog;
635+
if (new_flen != new_insn - first_insn) {
636+
new_flen = new_insn - first_insn;
629637
if (pass > 2)
630638
goto err;
631639
goto do_pass;
@@ -1017,7 +1025,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
10171025
fp->len = new_len;
10181026

10191027
/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1020-
err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
1028+
err = bpf_convert_filter(old_prog, old_len, fp, &new_len);
10211029
if (err)
10221030
/* 2nd bpf_convert_filter() can fail only if it fails
10231031
* to allocate memory, remapping must succeed. Note,

0 commit comments

Comments
 (0)