Skip to content

Commit 440ffcd

Browse files
committed
Daniel Borkmann says: ==================== pull-request: bpf 2021-10-26 We've added 12 non-merge commits during the last 7 day(s) which contain a total of 23 files changed, 118 insertions(+), 98 deletions(-). The main changes are: 1) Fix potential race window in BPF tail call compatibility check, from Toke Høiland-Jørgensen. 2) Fix memory leak in cgroup fs due to missing cgroup_bpf_offline(), from Quanyang Wang. 3) Fix file descriptor reference counting in generic_map_update_batch(), from Xu Kuohai. 4) Fix bpf_jit_limit knob to the max supported limit by the arch's JIT, from Lorenz Bauer. 5) Fix BPF sockmap ->poll callbacks for UDP and AF_UNIX sockets, from Cong Wang and Yucong Sun. 6) Fix BPF sockmap concurrency issue in TCP on non-blocking sendmsg calls, from Liu Jian. 7) Fix build failure of INODE_STORAGE and TASK_STORAGE maps on !CONFIG_NET, from Tejun Heo. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Fix potential race in tail call compatibility check bpf: Move BPF_MAP_TYPE for INODE_STORAGE and TASK_STORAGE outside of CONFIG_NET selftests/bpf: Use recv_timeout() instead of retries net: Implement ->sock_is_readable() for UDP and AF_UNIX skmsg: Extract and reuse sk_msg_is_readable() net: Rename ->stream_memory_read to ->sock_is_readable tcp_bpf: Fix one concurrency problem in the tcp_bpf_send_verdict function cgroup: Fix memory leak caused by missing cgroup_bpf_offline bpf: Fix error usage of map_fd and fdget() in generic_map_update_batch() bpf: Prevent increasing bpf_jit_limit above max bpf: Define bpf_jit_alloc_exec_limit for arm64 JIT bpf: Define bpf_jit_alloc_exec_limit for riscv JIT ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 19fa088 + 54713c8 commit 440ffcd

File tree

23 files changed

+118
-98
lines changed

23 files changed

+118
-98
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1136,6 +1136,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11361136
return prog;
11371137
}
11381138

1139+
u64 bpf_jit_alloc_exec_limit(void)
1140+
{
1141+
return BPF_JIT_REGION_SIZE;
1142+
}
1143+
11391144
void *bpf_jit_alloc_exec(unsigned long size)
11401145
{
11411146
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,

arch/riscv/net/bpf_jit_core.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
166166
return prog;
167167
}
168168

169+
u64 bpf_jit_alloc_exec_limit(void)
170+
{
171+
return BPF_JIT_REGION_SIZE;
172+
}
173+
169174
void *bpf_jit_alloc_exec(unsigned long size)
170175
{
171176
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,

include/linux/bpf.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -929,8 +929,11 @@ struct bpf_array_aux {
929929
* stored in the map to make sure that all callers and callees have
930930
* the same prog type and JITed flag.
931931
*/
932-
enum bpf_prog_type type;
933-
bool jited;
932+
struct {
933+
spinlock_t lock;
934+
enum bpf_prog_type type;
935+
bool jited;
936+
} owner;
934937
/* Programs with direct jumps into programs part of this array. */
935938
struct list_head poke_progs;
936939
struct bpf_map *map;

include/linux/bpf_types.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,14 +101,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
101101
#endif
102102
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
103103
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
104-
#ifdef CONFIG_NET
105-
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
106-
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
107-
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
108104
#ifdef CONFIG_BPF_LSM
109105
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
110106
#endif
111107
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
108+
#ifdef CONFIG_NET
109+
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
110+
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
111+
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
112112
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
113113
#if defined(CONFIG_XDP_SOCKETS)
114114
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)

include/linux/filter.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,6 +1051,7 @@ extern int bpf_jit_enable;
10511051
extern int bpf_jit_harden;
10521052
extern int bpf_jit_kallsyms;
10531053
extern long bpf_jit_limit;
1054+
extern long bpf_jit_limit_max;
10541055

10551056
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
10561057

include/linux/skmsg.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
128128
struct sk_msg *msg, u32 bytes);
129129
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
130130
int len, int flags);
131+
bool sk_msg_is_readable(struct sock *sk);
131132

132133
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
133134
{

include/net/sock.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1208,7 +1208,7 @@ struct proto {
12081208
#endif
12091209

12101210
bool (*stream_memory_free)(const struct sock *sk, int wake);
1211-
bool (*stream_memory_read)(const struct sock *sk);
1211+
bool (*sock_is_readable)(struct sock *sk);
12121212
/* Memory pressure */
12131213
void (*enter_memory_pressure)(struct sock *sk);
12141214
void (*leave_memory_pressure)(struct sock *sk);
@@ -2820,4 +2820,10 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
28202820

28212821
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
28222822

2823+
static inline bool sk_is_readable(struct sock *sk)
2824+
{
2825+
if (sk->sk_prot->sock_is_readable)
2826+
return sk->sk_prot->sock_is_readable(sk);
2827+
return false;
2828+
}
28232829
#endif /* _SOCK_H */

include/net/tls.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ void tls_sw_release_resources_rx(struct sock *sk);
375375
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
376376
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
377377
int nonblock, int flags, int *addr_len);
378-
bool tls_sw_stream_read(const struct sock *sk);
378+
bool tls_sw_sock_is_readable(struct sock *sk);
379379
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
380380
struct pipe_inode_info *pipe,
381381
size_t len, unsigned int flags);

kernel/bpf/arraymap.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1072,6 +1072,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
10721072
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
10731073
INIT_LIST_HEAD(&aux->poke_progs);
10741074
mutex_init(&aux->poke_mutex);
1075+
spin_lock_init(&aux->owner.lock);
10751076

10761077
map = array_map_alloc(attr);
10771078
if (IS_ERR(map)) {

kernel/bpf/core.c

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -524,6 +524,7 @@ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
524524
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
525525
int bpf_jit_harden __read_mostly;
526526
long bpf_jit_limit __read_mostly;
527+
long bpf_jit_limit_max __read_mostly;
527528

528529
static void
529530
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
@@ -817,7 +818,8 @@ u64 __weak bpf_jit_alloc_exec_limit(void)
817818
static int __init bpf_jit_charge_init(void)
818819
{
819820
/* Only used as heuristic here to derive limit. */
820-
bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
821+
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
822+
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
821823
PAGE_SIZE), LONG_MAX);
822824
return 0;
823825
}
@@ -1821,20 +1823,26 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
18211823
bool bpf_prog_array_compatible(struct bpf_array *array,
18221824
const struct bpf_prog *fp)
18231825
{
1826+
bool ret;
1827+
18241828
if (fp->kprobe_override)
18251829
return false;
18261830

1827-
if (!array->aux->type) {
1831+
spin_lock(&array->aux->owner.lock);
1832+
1833+
if (!array->aux->owner.type) {
18281834
/* There's no owner yet where we could check for
18291835
* compatibility.
18301836
*/
1831-
array->aux->type = fp->type;
1832-
array->aux->jited = fp->jited;
1833-
return true;
1837+
array->aux->owner.type = fp->type;
1838+
array->aux->owner.jited = fp->jited;
1839+
ret = true;
1840+
} else {
1841+
ret = array->aux->owner.type == fp->type &&
1842+
array->aux->owner.jited == fp->jited;
18341843
}
1835-
1836-
return array->aux->type == fp->type &&
1837-
array->aux->jited == fp->jited;
1844+
spin_unlock(&array->aux->owner.lock);
1845+
return ret;
18381846
}
18391847

18401848
static int bpf_check_tail_call(const struct bpf_prog *fp)

kernel/bpf/syscall.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -543,8 +543,10 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
543543

544544
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
545545
array = container_of(map, struct bpf_array, map);
546-
type = array->aux->type;
547-
jited = array->aux->jited;
546+
spin_lock(&array->aux->owner.lock);
547+
type = array->aux->owner.type;
548+
jited = array->aux->owner.jited;
549+
spin_unlock(&array->aux->owner.lock);
548550
}
549551

550552
seq_printf(m,
@@ -1337,12 +1339,11 @@ int generic_map_update_batch(struct bpf_map *map,
13371339
void __user *values = u64_to_user_ptr(attr->batch.values);
13381340
void __user *keys = u64_to_user_ptr(attr->batch.keys);
13391341
u32 value_size, cp, max_count;
1340-
int ufd = attr->map_fd;
1342+
int ufd = attr->batch.map_fd;
13411343
void *key, *value;
13421344
struct fd f;
13431345
int err = 0;
13441346

1345-
f = fdget(ufd);
13461347
if (attr->batch.elem_flags & ~BPF_F_LOCK)
13471348
return -EINVAL;
13481349

@@ -1367,6 +1368,7 @@ int generic_map_update_batch(struct bpf_map *map,
13671368
return -ENOMEM;
13681369
}
13691370

1371+
f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
13701372
for (cp = 0; cp < max_count; cp++) {
13711373
err = -EFAULT;
13721374
if (copy_from_user(key, keys + cp * map->key_size,
@@ -1386,6 +1388,7 @@ int generic_map_update_batch(struct bpf_map *map,
13861388

13871389
kvfree(value);
13881390
kvfree(key);
1391+
fdput(f);
13891392
return err;
13901393
}
13911394

kernel/cgroup/cgroup.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2187,8 +2187,10 @@ static void cgroup_kill_sb(struct super_block *sb)
21872187
* And don't kill the default root.
21882188
*/
21892189
if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
2190-
!percpu_ref_is_dying(&root->cgrp.self.refcnt))
2190+
!percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
2191+
cgroup_bpf_offline(&root->cgrp);
21912192
percpu_ref_kill(&root->cgrp.self.refcnt);
2193+
}
21922194
cgroup_put(&root->cgrp);
21932195
kernfs_kill_sb(sb);
21942196
}

net/core/skmsg.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -474,6 +474,20 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
474474
}
475475
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476476

477+
bool sk_msg_is_readable(struct sock *sk)
478+
{
479+
struct sk_psock *psock;
480+
bool empty = true;
481+
482+
rcu_read_lock();
483+
psock = sk_psock(sk);
484+
if (likely(psock))
485+
empty = list_empty(&psock->ingress_msg);
486+
rcu_read_unlock();
487+
return !empty;
488+
}
489+
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
490+
477491
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
478492
struct sk_buff *skb)
479493
{

net/core/sysctl_net_core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,7 @@ static struct ctl_table net_core_table[] = {
419419
.mode = 0600,
420420
.proc_handler = proc_dolongvec_minmax_bpf_restricted,
421421
.extra1 = &long_one,
422-
.extra2 = &long_max,
422+
.extra2 = &bpf_jit_limit_max,
423423
},
424424
#endif
425425
{

net/ipv4/tcp.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -486,10 +486,7 @@ static bool tcp_stream_is_readable(struct sock *sk, int target)
486486
{
487487
if (tcp_epollin_ready(sk, target))
488488
return true;
489-
490-
if (sk->sk_prot->stream_memory_read)
491-
return sk->sk_prot->stream_memory_read(sk);
492-
return false;
489+
return sk_is_readable(sk);
493490
}
494491

495492
/*

net/ipv4/tcp_bpf.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -150,19 +150,6 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
150150
EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
151151

152152
#ifdef CONFIG_BPF_SYSCALL
153-
static bool tcp_bpf_stream_read(const struct sock *sk)
154-
{
155-
struct sk_psock *psock;
156-
bool empty = true;
157-
158-
rcu_read_lock();
159-
psock = sk_psock(sk);
160-
if (likely(psock))
161-
empty = list_empty(&psock->ingress_msg);
162-
rcu_read_unlock();
163-
return !empty;
164-
}
165-
166153
static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
167154
long timeo)
168155
{
@@ -232,6 +219,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
232219
bool cork = false, enospc = sk_msg_full(msg);
233220
struct sock *sk_redir;
234221
u32 tosend, delta = 0;
222+
u32 eval = __SK_NONE;
235223
int ret;
236224

237225
more_data:
@@ -275,13 +263,24 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
275263
case __SK_REDIRECT:
276264
sk_redir = psock->sk_redir;
277265
sk_msg_apply_bytes(psock, tosend);
266+
if (!psock->apply_bytes) {
267+
/* Clean up before releasing the sock lock. */
268+
eval = psock->eval;
269+
psock->eval = __SK_NONE;
270+
psock->sk_redir = NULL;
271+
}
278272
if (psock->cork) {
279273
cork = true;
280274
psock->cork = NULL;
281275
}
282276
sk_msg_return(sk, msg, tosend);
283277
release_sock(sk);
278+
284279
ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
280+
281+
if (eval == __SK_REDIRECT)
282+
sock_put(sk_redir);
283+
285284
lock_sock(sk);
286285
if (unlikely(ret < 0)) {
287286
int free = sk_msg_free_nocharge(sk, msg);
@@ -479,7 +478,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
479478
prot[TCP_BPF_BASE].unhash = sock_map_unhash;
480479
prot[TCP_BPF_BASE].close = sock_map_close;
481480
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
482-
prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
481+
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
483482

484483
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
485484
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;

net/ipv4/udp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2867,6 +2867,9 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
28672867
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
28682868
mask &= ~(EPOLLIN | EPOLLRDNORM);
28692869

2870+
/* psock ingress_msg queue should not contain any bad checksum frames */
2871+
if (sk_is_readable(sk))
2872+
mask |= EPOLLIN | EPOLLRDNORM;
28702873
return mask;
28712874

28722875
}

net/ipv4/udp_bpf.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
114114
*prot = *base;
115115
prot->close = sock_map_close;
116116
prot->recvmsg = udp_bpf_recvmsg;
117+
prot->sock_is_readable = sk_msg_is_readable;
117118
}
118119

119120
static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)

net/tls/tls_main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -681,12 +681,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
681681

682682
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
683683
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
684-
prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
684+
prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
685685
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
686686

687687
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
688688
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
689-
prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
689+
prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
690690
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
691691

692692
#ifdef CONFIG_TLS_DEVICE

net/tls/tls_sw.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2026,7 +2026,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
20262026
return copied ? : err;
20272027
}
20282028

2029-
bool tls_sw_stream_read(const struct sock *sk)
2029+
bool tls_sw_sock_is_readable(struct sock *sk)
20302030
{
20312031
struct tls_context *tls_ctx = tls_get_ctx(sk);
20322032
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);

net/unix/af_unix.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3052,6 +3052,8 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
30523052
/* readable? */
30533053
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
30543054
mask |= EPOLLIN | EPOLLRDNORM;
3055+
if (sk_is_readable(sk))
3056+
mask |= EPOLLIN | EPOLLRDNORM;
30553057

30563058
/* Connection-based need to check for termination and startup */
30573059
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
@@ -3091,6 +3093,8 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
30913093
/* readable? */
30923094
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
30933095
mask |= EPOLLIN | EPOLLRDNORM;
3096+
if (sk_is_readable(sk))
3097+
mask |= EPOLLIN | EPOLLRDNORM;
30943098

30953099
/* Connection-based need to check for termination and startup */
30963100
if (sk->sk_type == SOCK_SEQPACKET) {

net/unix/unix_bpf.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto
102102
*prot = *base;
103103
prot->close = sock_map_close;
104104
prot->recvmsg = unix_bpf_recvmsg;
105+
prot->sock_is_readable = sk_msg_is_readable;
105106
}
106107

107108
static void unix_stream_bpf_rebuild_protos(struct proto *prot,
@@ -110,6 +111,7 @@ static void unix_stream_bpf_rebuild_protos(struct proto *prot,
110111
*prot = *base;
111112
prot->close = sock_map_close;
112113
prot->recvmsg = unix_bpf_recvmsg;
114+
prot->sock_is_readable = sk_msg_is_readable;
113115
prot->unhash = sock_map_unhash;
114116
}
115117

0 commit comments

Comments
 (0)