Skip to content

Commit 5d4cc87

Browse files
edumazetPaolo Abeni
authored andcommitted
net: reorganize "struct sock" fields
Last major reorg happened in commit 9115e8c ("net: reorganize struct sock for better data locality") Since then, many changes have been done. Before SO_PEEK_OFF support is added to TCP, we need to move sk_peek_off to a better location. It is time to make another pass, and add six groups, without explicit alignment. - sock_write_rx (following sk_refcnt) read-write fields in rx path. - sock_read_rx read-mostly fields in rx path. - sock_read_rxtx read-mostly fields in both rx and tx paths. - sock_write_rxtx read-write fields in both rx and tx paths. - sock_write_tx read-write fields in tx paths. - sock_read_tx read-mostly fields in tx paths. Results on TCP_RR benchmarks seem to show a gain (4 to 5 %). It is possible UDP needs a change, because sk_peek_off shares a cache line with sk_receive_queue. If this the case, we can exchange roles of sk->sk_receive and up->reader_queue queues. After this change, we have the following layout: struct sock { struct sock_common __sk_common; /* 0 0x88 */ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ __u8 __cacheline_group_begin__sock_write_rx[0]; /* 0x88 0 */ atomic_t sk_drops; /* 0x88 0x4 */ __s32 sk_peek_off; /* 0x8c 0x4 */ struct sk_buff_head sk_error_queue; /* 0x90 0x18 */ struct sk_buff_head sk_receive_queue; /* 0xa8 0x18 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct { atomic_t rmem_alloc; /* 0xc0 0x4 */ int len; /* 0xc4 0x4 */ struct sk_buff * head; /* 0xc8 0x8 */ struct sk_buff * tail; /* 0xd0 0x8 */ } sk_backlog; /* 0xc0 0x18 */ struct { atomic_t rmem_alloc; /* 0 0x4 */ int len; /* 0x4 0x4 */ struct sk_buff * head; /* 0x8 0x8 */ struct sk_buff * tail; /* 0x10 0x8 */ /* size: 24, cachelines: 1, members: 4 */ /* last cacheline: 24 bytes */ }; __u8 __cacheline_group_end__sock_write_rx[0]; /* 0xd8 0 */ __u8 __cacheline_group_begin__sock_read_rx[0]; /* 0xd8 0 */ rcu * sk_rx_dst; /* 0xd8 0x8 */ int sk_rx_dst_ifindex; /* 0xe0 0x4 */ u32 sk_rx_dst_cookie; /* 0xe4 0x4 */ unsigned int sk_ll_usec; /* 0xe8 0x4 */ unsigned int sk_napi_id; /* 0xec 0x4 */ u16 sk_busy_poll_budget; /* 0xf0 0x2 */ u8 sk_prefer_busy_poll; /* 0xf2 0x1 */ u8 sk_userlocks; /* 0xf3 0x1 */ int sk_rcvbuf; /* 0xf4 0x4 */ rcu * sk_filter; /* 0xf8 0x8 */ /* --- cacheline 4 boundary (256 bytes) --- */ union { rcu * sk_wq; /* 0x100 0x8 */ struct socket_wq * sk_wq_raw; /* 0x100 0x8 */ }; /* 0x100 0x8 */ union { rcu * sk_wq; /* 0 0x8 */ struct socket_wq * sk_wq_raw; /* 0 0x8 */ }; void (*sk_data_ready)(struct sock *); /* 0x108 0x8 */ long sk_rcvtimeo; /* 0x110 0x8 */ int sk_rcvlowat; /* 0x118 0x4 */ __u8 __cacheline_group_end__sock_read_rx[0]; /* 0x11c 0 */ __u8 __cacheline_group_begin__sock_read_rxtx[0]; /* 0x11c 0 */ int sk_err; /* 0x11c 0x4 */ struct socket * sk_socket; /* 0x120 0x8 */ struct mem_cgroup * sk_memcg; /* 0x128 0x8 */ rcu * sk_policy[2]; /* 0x130 0x10 */ /* --- cacheline 5 boundary (320 bytes) --- */ __u8 __cacheline_group_end__sock_read_rxtx[0]; /* 0x140 0 */ __u8 __cacheline_group_begin__sock_write_rxtx[0]; /* 0x140 0 */ socket_lock_t sk_lock; /* 0x140 0x20 */ u32 sk_reserved_mem; /* 0x160 0x4 */ int sk_forward_alloc; /* 0x164 0x4 */ u32 sk_tsflags; /* 0x168 0x4 */ __u8 __cacheline_group_end__sock_write_rxtx[0]; /* 0x16c 0 */ __u8 __cacheline_group_begin__sock_write_tx[0]; /* 0x16c 0 */ int sk_write_pending; /* 0x16c 0x4 */ atomic_t sk_omem_alloc; /* 0x170 0x4 */ int sk_sndbuf; /* 0x174 0x4 */ int sk_wmem_queued; /* 0x178 0x4 */ refcount_t sk_wmem_alloc; /* 0x17c 0x4 */ /* --- cacheline 6 boundary (384 bytes) --- */ unsigned long sk_tsq_flags; /* 0x180 0x8 */ union { struct sk_buff * sk_send_head; /* 0x188 0x8 */ struct rb_root tcp_rtx_queue; /* 0x188 0x8 */ }; /* 0x188 0x8 */ union { struct sk_buff * sk_send_head; /* 0 0x8 */ struct rb_root tcp_rtx_queue; /* 0 0x8 */ }; struct sk_buff_head sk_write_queue; /* 0x190 0x18 */ u32 sk_dst_pending_confirm; /* 0x1a8 0x4 */ u32 sk_pacing_status; /* 0x1ac 0x4 */ struct page_frag sk_frag; /* 0x1b0 0x10 */ /* --- cacheline 7 boundary (448 bytes) --- */ struct timer_list sk_timer; /* 0x1c0 0x28 */ /* XXX last struct has 4 bytes of padding */ unsigned long sk_pacing_rate; /* 0x1e8 0x8 */ atomic_t sk_zckey; /* 0x1f0 0x4 */ atomic_t sk_tskey; /* 0x1f4 0x4 */ __u8 __cacheline_group_end__sock_write_tx[0]; /* 0x1f8 0 */ __u8 __cacheline_group_begin__sock_read_tx[0]; /* 0x1f8 0 */ unsigned long sk_max_pacing_rate; /* 0x1f8 0x8 */ /* --- cacheline 8 boundary (512 bytes) --- */ long sk_sndtimeo; /* 0x200 0x8 */ u32 sk_priority; /* 0x208 0x4 */ u32 sk_mark; /* 0x20c 0x4 */ rcu * sk_dst_cache; /* 0x210 0x8 */ netdev_features_t sk_route_caps; /* 0x218 0x8 */ u16 sk_gso_type; /* 0x220 0x2 */ u16 sk_gso_max_segs; /* 0x222 0x2 */ unsigned int sk_gso_max_size; /* 0x224 0x4 */ gfp_t sk_allocation; /* 0x228 0x4 */ u32 sk_txhash; /* 0x22c 0x4 */ u8 sk_pacing_shift; /* 0x230 0x1 */ bool sk_use_task_frag; /* 0x231 0x1 */ __u8 __cacheline_group_end__sock_read_tx[0]; /* 0x232 0 */ u8 sk_gso_disabled:1; /* 0x232: 0 0x1 */ u8 sk_kern_sock:1; /* 0x232:0x1 0x1 */ u8 sk_no_check_tx:1; /* 0x232:0x2 0x1 */ u8 sk_no_check_rx:1; /* 0x232:0x3 0x1 */ /* XXX 4 bits hole, try to pack */ u8 sk_shutdown; /* 0x233 0x1 */ u16 sk_type; /* 0x234 0x2 */ u16 sk_protocol; /* 0x236 0x2 */ unsigned long sk_lingertime; /* 0x238 0x8 */ /* --- cacheline 9 boundary (576 bytes) --- */ struct proto * sk_prot_creator; /* 0x240 0x8 */ rwlock_t sk_callback_lock; /* 0x248 0x8 */ int sk_err_soft; /* 0x250 0x4 */ u32 sk_ack_backlog; /* 0x254 0x4 */ u32 sk_max_ack_backlog; /* 0x258 0x4 */ kuid_t sk_uid; /* 0x25c 0x4 */ spinlock_t sk_peer_lock; /* 0x260 0x4 */ int sk_bind_phc; /* 0x264 0x4 */ struct pid * sk_peer_pid; /* 0x268 0x8 */ const struct cred * sk_peer_cred; /* 0x270 0x8 */ ktime_t sk_stamp; /* 0x278 0x8 */ /* --- cacheline 10 boundary (640 bytes) --- */ int sk_disconnects; /* 0x280 0x4 */ u8 sk_txrehash; /* 0x284 0x1 */ u8 sk_clockid; /* 0x285 0x1 */ u8 sk_txtime_deadline_mode:1; /* 0x286: 0 0x1 */ u8 sk_txtime_report_errors:1; /* 0x286:0x1 0x1 */ u8 sk_txtime_unused:6; /* 0x286:0x2 0x1 */ /* XXX 1 byte hole, try to pack */ void * sk_user_data; /* 0x288 0x8 */ void * sk_security; /* 0x290 0x8 */ struct sock_cgroup_data sk_cgrp_data; /* 0x298 0x8 */ void (*sk_state_change)(struct sock *); /* 0x2a0 0x8 */ void (*sk_write_space)(struct sock *); /* 0x2a8 0x8 */ void (*sk_error_report)(struct sock *); /* 0x2b0 0x8 */ int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); /* 0x2b8 0x8 */ /* --- cacheline 11 boundary (704 bytes) --- */ void (*sk_destruct)(struct sock *); /* 0x2c0 0x8 */ rcu * sk_reuseport_cb; /* 0x2c8 0x8 */ rcu * sk_bpf_storage; /* 0x2d0 0x8 */ struct callback_head sk_rcu __attribute__((__aligned__(8))); /* 0x2d8 0x10 */ netns_tracker ns_tracker; /* 0x2e8 0x8 */ /* size: 752, cachelines: 12, members: 105 */ /* sum members: 749, holes: 1, sum holes: 1 */ /* sum bitfield members: 12 bits, bit holes: 1, sum bit holes: 4 bits */ /* paddings: 1, sum paddings: 4 */ /* forced alignments: 1 */ /* last cacheline: 48 bytes */ }; Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Paolo Abeni <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
1 parent 465c1ab commit 5d4cc87

File tree

2 files changed

+123
-47
lines changed

2 files changed

+123
-47
lines changed

include/net/sock.h

Lines changed: 61 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -378,14 +378,10 @@ struct sock {
378378
#define sk_flags __sk_common.skc_flags
379379
#define sk_rxhash __sk_common.skc_rxhash
380380

381-
/* early demux fields */
382-
struct dst_entry __rcu *sk_rx_dst;
383-
int sk_rx_dst_ifindex;
384-
u32 sk_rx_dst_cookie;
381+
__cacheline_group_begin(sock_write_rx);
385382

386-
socket_lock_t sk_lock;
387383
atomic_t sk_drops;
388-
int sk_rcvlowat;
384+
__s32 sk_peek_off;
389385
struct sk_buff_head sk_error_queue;
390386
struct sk_buff_head sk_receive_queue;
391387
/*
@@ -402,18 +398,24 @@ struct sock {
402398
struct sk_buff *head;
403399
struct sk_buff *tail;
404400
} sk_backlog;
405-
406401
#define sk_rmem_alloc sk_backlog.rmem_alloc
407402

408-
int sk_forward_alloc;
409-
u32 sk_reserved_mem;
403+
__cacheline_group_end(sock_write_rx);
404+
405+
__cacheline_group_begin(sock_read_rx);
406+
/* early demux fields */
407+
struct dst_entry __rcu *sk_rx_dst;
408+
int sk_rx_dst_ifindex;
409+
u32 sk_rx_dst_cookie;
410+
410411
#ifdef CONFIG_NET_RX_BUSY_POLL
411412
unsigned int sk_ll_usec;
412-
/* ===== mostly read cache line ===== */
413413
unsigned int sk_napi_id;
414+
u16 sk_busy_poll_budget;
415+
u8 sk_prefer_busy_poll;
414416
#endif
417+
u8 sk_userlocks;
415418
int sk_rcvbuf;
416-
int sk_disconnects;
417419

418420
struct sk_filter __rcu *sk_filter;
419421
union {
@@ -422,15 +424,33 @@ struct sock {
422424
struct socket_wq *sk_wq_raw;
423425
/* public: */
424426
};
427+
428+
void (*sk_data_ready)(struct sock *sk);
429+
long sk_rcvtimeo;
430+
int sk_rcvlowat;
431+
__cacheline_group_end(sock_read_rx);
432+
433+
__cacheline_group_begin(sock_read_rxtx);
434+
int sk_err;
435+
struct socket *sk_socket;
436+
struct mem_cgroup *sk_memcg;
425437
#ifdef CONFIG_XFRM
426438
struct xfrm_policy __rcu *sk_policy[2];
427439
#endif
440+
__cacheline_group_end(sock_read_rxtx);
428441

429-
struct dst_entry __rcu *sk_dst_cache;
442+
__cacheline_group_begin(sock_write_rxtx);
443+
socket_lock_t sk_lock;
444+
u32 sk_reserved_mem;
445+
int sk_forward_alloc;
446+
u32 sk_tsflags;
447+
__cacheline_group_end(sock_write_rxtx);
448+
449+
__cacheline_group_begin(sock_write_tx);
450+
int sk_write_pending;
430451
atomic_t sk_omem_alloc;
431452
int sk_sndbuf;
432453

433-
/* ===== cache line for TX ===== */
434454
int sk_wmem_queued;
435455
refcount_t sk_wmem_alloc;
436456
unsigned long sk_tsq_flags;
@@ -439,22 +459,36 @@ struct sock {
439459
struct rb_root tcp_rtx_queue;
440460
};
441461
struct sk_buff_head sk_write_queue;
442-
__s32 sk_peek_off;
443-
int sk_write_pending;
444-
__u32 sk_dst_pending_confirm;
462+
u32 sk_dst_pending_confirm;
445463
u32 sk_pacing_status; /* see enum sk_pacing */
446-
long sk_sndtimeo;
464+
struct page_frag sk_frag;
447465
struct timer_list sk_timer;
448-
__u32 sk_priority;
449-
__u32 sk_mark;
466+
450467
unsigned long sk_pacing_rate; /* bytes per second */
468+
atomic_t sk_zckey;
469+
atomic_t sk_tskey;
470+
__cacheline_group_end(sock_write_tx);
471+
472+
__cacheline_group_begin(sock_read_tx);
451473
unsigned long sk_max_pacing_rate;
452-
struct page_frag sk_frag;
474+
long sk_sndtimeo;
475+
u32 sk_priority;
476+
u32 sk_mark;
477+
struct dst_entry __rcu *sk_dst_cache;
453478
netdev_features_t sk_route_caps;
454-
int sk_gso_type;
479+
#ifdef CONFIG_SOCK_VALIDATE_XMIT
480+
struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
481+
struct net_device *dev,
482+
struct sk_buff *skb);
483+
#endif
484+
u16 sk_gso_type;
485+
u16 sk_gso_max_segs;
455486
unsigned int sk_gso_max_size;
456487
gfp_t sk_allocation;
457-
__u32 sk_txhash;
488+
u32 sk_txhash;
489+
u8 sk_pacing_shift;
490+
bool sk_use_task_frag;
491+
__cacheline_group_end(sock_read_tx);
458492

459493
/*
460494
* Because of non atomicity rules, all
@@ -463,64 +497,44 @@ struct sock {
463497
u8 sk_gso_disabled : 1,
464498
sk_kern_sock : 1,
465499
sk_no_check_tx : 1,
466-
sk_no_check_rx : 1,
467-
sk_userlocks : 4;
468-
u8 sk_pacing_shift;
500+
sk_no_check_rx : 1;
501+
u8 sk_shutdown;
469502
u16 sk_type;
470503
u16 sk_protocol;
471-
u16 sk_gso_max_segs;
472504
unsigned long sk_lingertime;
473505
struct proto *sk_prot_creator;
474506
rwlock_t sk_callback_lock;
475-
int sk_err,
476-
sk_err_soft;
507+
int sk_err_soft;
477508
u32 sk_ack_backlog;
478509
u32 sk_max_ack_backlog;
479510
kuid_t sk_uid;
480-
u8 sk_txrehash;
481-
#ifdef CONFIG_NET_RX_BUSY_POLL
482-
u8 sk_prefer_busy_poll;
483-
u16 sk_busy_poll_budget;
484-
#endif
485511
spinlock_t sk_peer_lock;
486512
int sk_bind_phc;
487513
struct pid *sk_peer_pid;
488514
const struct cred *sk_peer_cred;
489515

490-
long sk_rcvtimeo;
491516
ktime_t sk_stamp;
492517
#if BITS_PER_LONG==32
493518
seqlock_t sk_stamp_seq;
494519
#endif
495-
atomic_t sk_tskey;
496-
atomic_t sk_zckey;
497-
u32 sk_tsflags;
498-
u8 sk_shutdown;
520+
int sk_disconnects;
499521

522+
u8 sk_txrehash;
500523
u8 sk_clockid;
501524
u8 sk_txtime_deadline_mode : 1,
502525
sk_txtime_report_errors : 1,
503526
sk_txtime_unused : 6;
504-
bool sk_use_task_frag;
505527

506-
struct socket *sk_socket;
507528
void *sk_user_data;
508529
#ifdef CONFIG_SECURITY
509530
void *sk_security;
510531
#endif
511532
struct sock_cgroup_data sk_cgrp_data;
512-
struct mem_cgroup *sk_memcg;
513533
void (*sk_state_change)(struct sock *sk);
514-
void (*sk_data_ready)(struct sock *sk);
515534
void (*sk_write_space)(struct sock *sk);
516535
void (*sk_error_report)(struct sock *sk);
517536
int (*sk_backlog_rcv)(struct sock *sk,
518537
struct sk_buff *skb);
519-
#ifdef CONFIG_SOCK_VALIDATE_XMIT
520-
struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
521-
struct net_device *dev,
522-
struct sk_buff *skb);
523-
#endif
524538
void (*sk_destruct)(struct sock *sk);
525539
struct sock_reuseport __rcu *sk_reuseport_cb;
526540
#ifdef CONFIG_BPF_SYSCALL

net/core/sock.c

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4234,3 +4234,65 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
42344234
return sock_ioctl_out(sk, cmd, arg);
42354235
}
42364236
EXPORT_SYMBOL(sk_ioctl);
4237+
4238+
static int __init sock_struct_check(void)
4239+
{
4240+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
4241+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
4242+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
4243+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
4244+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
4245+
4246+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
4247+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
4248+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
4249+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
4250+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
4251+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
4252+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
4253+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
4254+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
4255+
4256+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
4257+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
4258+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
4259+
4260+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
4261+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
4262+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
4263+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
4264+
4265+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4266+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4267+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
4268+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
4269+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
4270+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
4271+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
4272+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
4273+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
4274+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
4275+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
4276+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
4277+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
4278+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
4279+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
4280+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
4281+
4282+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
4283+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
4284+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
4285+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
4286+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
4287+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
4288+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
4289+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
4290+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
4291+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
4292+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
4293+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
4294+
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
4295+
return 0;
4296+
}
4297+
4298+
core_initcall(sock_struct_check);

0 commit comments

Comments
 (0)