Skip to content

Commit 65869a4

Browse files
borkmanndavem330
authored andcommitted
bpf: improve read-only handling
Improve bpf_{prog,jit_binary}_{un,}lock_ro() by throwing a one-time warning in case of an error when the image couldn't be set read-only, and also mark struct bpf_prog as locked when bpf_prog_lock_ro() was called. Reason for the latter is that bpf_prog_unlock_ro() is called from various places including error paths, and we shouldn't mess with page attributes when really not needed. For bpf_jit_binary_unlock_ro() this is not needed as jited flag implicitly indicates this, thus for archs with ARCH_HAS_SET_MEMORY we're guaranteed to have a previously locked image. Overall, this should also help us to identify any further potential issues with set_memory_*() helpers. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1da8ac7 commit 65869a4

File tree

1 file changed

+12
-4
lines changed

1 file changed

+12
-4
lines changed

include/linux/filter.h

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,7 @@ struct bpf_prog {
409409
u16 pages; /* Number of allocated pages */
410410
kmemcheck_bitfield_begin(meta);
411411
u16 jited:1, /* Is our filter JIT'ed? */
412+
locked:1, /* Program image locked? */
412413
gpl_compatible:1, /* Is filter GPL compatible? */
413414
cb_access:1, /* Is control block accessed? */
414415
dst_needed:1, /* Do we need dst entry? */
@@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
554555
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
555556
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
556557
{
557-
set_memory_ro((unsigned long)fp, fp->pages);
558+
fp->locked = 1;
559+
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
558560
}
559561

560562
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
561563
{
562-
set_memory_rw((unsigned long)fp, fp->pages);
564+
if (fp->locked) {
565+
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
566+
/* In case set_memory_rw() fails, we want to be the first
567+
* to crash here instead of some random place later on.
568+
*/
569+
fp->locked = 0;
570+
}
563571
}
564572

565573
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
566574
{
567-
set_memory_ro((unsigned long)hdr, hdr->pages);
575+
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
568576
}
569577

570578
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
571579
{
572-
set_memory_rw((unsigned long)hdr, hdr->pages);
580+
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
573581
}
574582
#else
575583
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)

0 commit comments

Comments
 (0)