Skip to content

Commit d7d16a8

Browse files
edumazetdavem330
authored andcommitted
net: add skb_queue_empty_lockless()
Some paths call skb_queue_empty() without holding the queue lock. We must use a barrier in order to not let the compiler do strange things, and avoid KCSAN splats. Adding a barrier in skb_queue_empty() might be overkill, I prefer adding a new helper to clearly identify points where the callers might be lockless. This might help us finding real bugs. The corresponding WRITE_ONCE() should add zero cost for current compilers. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent fc11078 commit d7d16a8

File tree

1 file changed

+24
-9
lines changed

1 file changed

+24
-9
lines changed

include/linux/skbuff.h

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1495,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
14951495
return list->next == (const struct sk_buff *) list;
14961496
}
14971497

1498+
/**
1499+
* skb_queue_empty_lockless - check if a queue is empty
1500+
* @list: queue head
1501+
*
1502+
* Returns true if the queue is empty, false otherwise.
1503+
* This variant can be used in lockless contexts.
1504+
*/
1505+
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1506+
{
1507+
return READ_ONCE(list->next) == (const struct sk_buff *) list;
1508+
}
1509+
1510+
14981511
/**
14991512
* skb_queue_is_last - check if skb is the last entry in the queue
15001513
* @list: queue head
@@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
18481861
struct sk_buff *prev, struct sk_buff *next,
18491862
struct sk_buff_head *list)
18501863
{
1851-
newsk->next = next;
1852-
newsk->prev = prev;
1853-
next->prev = prev->next = newsk;
1864+
/* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
1865+
WRITE_ONCE(newsk->next, next);
1866+
WRITE_ONCE(newsk->prev, prev);
1867+
WRITE_ONCE(next->prev, newsk);
1868+
WRITE_ONCE(prev->next, newsk);
18541869
list->qlen++;
18551870
}
18561871

@@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
18611876
struct sk_buff *first = list->next;
18621877
struct sk_buff *last = list->prev;
18631878

1864-
first->prev = prev;
1865-
prev->next = first;
1879+
WRITE_ONCE(first->prev, prev);
1880+
WRITE_ONCE(prev->next, first);
18661881

1867-
last->next = next;
1868-
next->prev = last;
1882+
WRITE_ONCE(last->next, next);
1883+
WRITE_ONCE(next->prev, last);
18691884
}
18701885

18711886
/**
@@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
20062021
next = skb->next;
20072022
prev = skb->prev;
20082023
skb->next = skb->prev = NULL;
2009-
next->prev = prev;
2010-
prev->next = next;
2024+
WRITE_ONCE(next->prev, prev);
2025+
WRITE_ONCE(prev->next, next);
20112026
}
20122027

20132028
/**

0 commit comments

Comments
 (0)