Skip to content

Commit efecfd5

Browse files
anguy11Jeff Kirsher
authored andcommitted
ixgbevf: Delay tail write for XDP packets
Current XDP implementation hits the tail on every XDP_TX; change the driver to only hit the tail after packet processing is complete. Based on commit 7379f97 ("ixgbe: delay tail write to every 'n' packets") Signed-off-by: Tony Nguyen <[email protected]> Acked-by: John Fastabend <[email protected]> Tested-by: Andrew Bowers <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent 21092e9 commit efecfd5

File tree

1 file changed

+18
-12
lines changed

1 file changed

+18
-12
lines changed

drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,14 +1016,8 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
10161016
cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
10171017
IXGBE_ADVTXD_CC);
10181018

1019-
/* Force memory writes to complete before letting h/w know there
1020-
* are new descriptors to fetch. (Only applicable for weak-ordered
1021-
* memory model archs, such as IA-64).
1022-
*
1023-
* We also need this memory barrier to make certain all of the
1024-
* status bits have been updated before next_to_watch is written.
1025-
*/
1026-
wmb();
1019+
/* Avoid any potential race with cleanup */
1020+
smp_wmb();
10271021

10281022
/* set next_to_watch value indicating a packet is present */
10291023
i++;
@@ -1033,8 +1027,6 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
10331027
tx_buffer->next_to_watch = tx_desc;
10341028
ring->next_to_use = i;
10351029

1036-
/* notify HW of packet */
1037-
ixgbevf_write_tail(ring, i);
10381030
return IXGBEVF_XDP_TX;
10391031
}
10401032

@@ -1101,6 +1093,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
11011093
struct ixgbevf_adapter *adapter = q_vector->adapter;
11021094
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
11031095
struct sk_buff *skb = rx_ring->skb;
1096+
bool xdp_xmit = false;
11041097
struct xdp_buff xdp;
11051098

11061099
xdp.rxq = &rx_ring->xdp_rxq;
@@ -1142,11 +1135,13 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
11421135
}
11431136

11441137
if (IS_ERR(skb)) {
1145-
if (PTR_ERR(skb) == -IXGBEVF_XDP_TX)
1138+
if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1139+
xdp_xmit = true;
11461140
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
11471141
size);
1148-
else
1142+
} else {
11491143
rx_buffer->pagecnt_bias++;
1144+
}
11501145
total_rx_packets++;
11511146
total_rx_bytes += size;
11521147
} else if (skb) {
@@ -1208,6 +1203,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
12081203
/* place incomplete frames back on ring for completion */
12091204
rx_ring->skb = skb;
12101205

1206+
if (xdp_xmit) {
1207+
struct ixgbevf_ring *xdp_ring =
1208+
adapter->xdp_ring[rx_ring->queue_index];
1209+
1210+
/* Force memory writes to complete before letting h/w
1211+
* know there are new descriptors to fetch.
1212+
*/
1213+
wmb();
1214+
ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1215+
}
1216+
12111217
u64_stats_update_begin(&rx_ring->syncp);
12121218
rx_ring->stats.packets += total_rx_packets;
12131219
rx_ring->stats.bytes += total_rx_bytes;

0 commit comments

Comments
 (0)