Skip to content

Commit 738ac1e

Browse files
herbertxdavem330
authored andcommitted
net: Clone skb before setting peeked flag
Shared skbs must not be modified and this is crucial for broadcast and/or multicast paths where we use it as an optimisation to avoid unnecessary cloning. The function skb_recv_datagram breaks this rule by setting peeked without cloning the skb first. This causes funky races which leads to double-free. This patch fixes this by cloning the skb and replacing the skb in the list when setting skb->peeked. Fixes: a59322b ("[UDP]: Only increment counter on first peek/recv") Reported-by: Konstantin Khlebnikov <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 035d210 commit 738ac1e

File tree

1 file changed

+38
-3
lines changed

1 file changed

+38
-3
lines changed

net/core/datagram.c

Lines changed: 38 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,35 @@ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
131131
goto out;
132132
}
133133

134+
static int skb_set_peeked(struct sk_buff *skb)
135+
{
136+
struct sk_buff *nskb;
137+
138+
if (skb->peeked)
139+
return 0;
140+
141+
/* We have to unshare an skb before modifying it. */
142+
if (!skb_shared(skb))
143+
goto done;
144+
145+
nskb = skb_clone(skb, GFP_ATOMIC);
146+
if (!nskb)
147+
return -ENOMEM;
148+
149+
skb->prev->next = nskb;
150+
skb->next->prev = nskb;
151+
nskb->prev = skb->prev;
152+
nskb->next = skb->next;
153+
154+
consume_skb(skb);
155+
skb = nskb;
156+
157+
done:
158+
skb->peeked = 1;
159+
160+
return 0;
161+
}
162+
134163
/**
135164
* __skb_recv_datagram - Receive a datagram skbuff
136165
* @sk: socket
@@ -165,7 +194,9 @@ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
165194
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166195
int *peeked, int *off, int *err)
167196
{
197+
struct sk_buff_head *queue = &sk->sk_receive_queue;
168198
struct sk_buff *skb, *last;
199+
unsigned long cpu_flags;
169200
long timeo;
170201
/*
171202
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184215
* Look at current nfs client by the way...
185216
* However, this function was correct in any case. 8)
186217
*/
187-
unsigned long cpu_flags;
188-
struct sk_buff_head *queue = &sk->sk_receive_queue;
189218
int _off = *off;
190219

191220
last = (struct sk_buff *)queue;
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199228
_off -= skb->len;
200229
continue;
201230
}
202-
skb->peeked = 1;
231+
232+
error = skb_set_peeked(skb);
233+
if (error)
234+
goto unlock_err;
235+
203236
atomic_inc(&skb->users);
204237
} else
205238
__skb_unlink(skb, queue);
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223256

224257
return NULL;
225258

259+
unlock_err:
260+
spin_unlock_irqrestore(&queue->lock, cpu_flags);
226261
no_packet:
227262
*err = error;
228263
return NULL;

0 commit comments

Comments
 (0)