Skip to content

Commit 545cd5e

Browse files
Alexander Duyckdavem330
authored andcommitted
net: Busy polling should ignore sender CPUs
This patch is a cleanup/fix for NAPI IDs following the changes that made it so that sender_cpu and napi_id were doing a better job of sharing the same location in the sk_buff. One issue I found is that we weren't validating the napi_id as being valid before we started trying to setup the busy polling. This change corrects that by using the MIN_NAPI_ID value that is now used in both allocating the NAPI IDs, as well as validating them. Signed-off-by: Alexander Duyck <[email protected]> Acked-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent dcb421f commit 545cd5e

File tree

2 files changed

+16
-6
lines changed

2 files changed

+16
-6
lines changed

include/net/busy_poll.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,12 @@ struct napi_struct;
3535
extern unsigned int sysctl_net_busy_read __read_mostly;
3636
extern unsigned int sysctl_net_busy_poll __read_mostly;
3737

38+
/* 0 - Reserved to indicate value not set
39+
* 1..NR_CPUS - Reserved for sender_cpu
40+
* NR_CPUS+1..~0 - Region available for NAPI IDs
41+
*/
42+
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
43+
3844
static inline bool net_busy_loop_on(void)
3945
{
4046
return sysctl_net_busy_poll;
@@ -58,10 +64,9 @@ static inline unsigned long busy_loop_end_time(void)
5864

5965
static inline bool sk_can_busy_loop(const struct sock *sk)
6066
{
61-
return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current);
67+
return sk->sk_ll_usec && !signal_pending(current);
6268
}
6369

64-
6570
static inline bool busy_loop_timeout(unsigned long end_time)
6671
{
6772
unsigned long now = busy_loop_us_clock();

net/core/dev.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5066,15 +5066,20 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
50665066
int (*napi_poll)(struct napi_struct *napi, int budget);
50675067
void *have_poll_lock = NULL;
50685068
struct napi_struct *napi;
5069+
unsigned int napi_id;
50695070
int rc;
50705071

50715072
restart:
5073+
napi_id = READ_ONCE(sk->sk_napi_id);
5074+
if (napi_id < MIN_NAPI_ID)
5075+
return 0;
5076+
50725077
rc = false;
50735078
napi_poll = NULL;
50745079

50755080
rcu_read_lock();
50765081

5077-
napi = napi_by_id(sk->sk_napi_id);
5082+
napi = napi_by_id(napi_id);
50785083
if (!napi)
50795084
goto out;
50805085

@@ -5143,10 +5148,10 @@ static void napi_hash_add(struct napi_struct *napi)
51435148

51445149
spin_lock(&napi_hash_lock);
51455150

5146-
/* 0..NR_CPUS+1 range is reserved for sender_cpu use */
5151+
/* 0..NR_CPUS range is reserved for sender_cpu use */
51475152
do {
5148-
if (unlikely(++napi_gen_id < NR_CPUS + 1))
5149-
napi_gen_id = NR_CPUS + 1;
5153+
if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5154+
napi_gen_id = MIN_NAPI_ID;
51505155
} while (napi_by_id(napi_gen_id));
51515156
napi->napi_id = napi_gen_id;
51525157

0 commit comments

Comments
 (0)