Skip to content

Commit ad435ec

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
ixgbe: Remove tail write abstraction and add missing barrier
This change cleans up the tail writes for the ixgbe descriptor queues. The current implementation had me confused as I wasn't sure if it was still making use of the surprise remove logic or not. It also adds the mmiowb which is needed on ia64, mips, and a couple other architectures in order to synchronize the MMIO writes with the Tx queue _xmit_lock spinlock. Cc: Don Skidmore <[email protected]> Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Phil Schmitt <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent 18cb652 commit ad435ec

File tree

2 files changed

+20
-25
lines changed

2 files changed

+20
-25
lines changed

drivers/net/ethernet/intel/ixgbe/ixgbe.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -553,11 +553,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
553553
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
554554
}
555555

556-
static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
557-
{
558-
writel(value, ring->tail);
559-
}
560-
561556
#define IXGBE_RX_DESC(R, i) \
562557
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
563558
#define IXGBE_TX_DESC(R, i) \

drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
14161416
skb->ip_summed = CHECKSUM_UNNECESSARY;
14171417
}
14181418

1419-
static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1420-
{
1421-
rx_ring->next_to_use = val;
1422-
1423-
/* update next to alloc since we have filled the ring */
1424-
rx_ring->next_to_alloc = val;
1425-
/*
1426-
* Force memory writes to complete before letting h/w
1427-
* know there are new descriptors to fetch. (Only
1428-
* applicable for weak-ordered memory model archs,
1429-
* such as IA-64).
1430-
*/
1431-
wmb();
1432-
ixgbe_write_tail(rx_ring, val);
1433-
}
1434-
14351419
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
14361420
struct ixgbe_rx_buffer *bi)
14371421
{
@@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
15171501

15181502
i += rx_ring->count;
15191503

1520-
if (rx_ring->next_to_use != i)
1521-
ixgbe_release_rx_desc(rx_ring, i);
1504+
if (rx_ring->next_to_use != i) {
1505+
rx_ring->next_to_use = i;
1506+
1507+
/* update next to alloc since we have filled the ring */
1508+
rx_ring->next_to_alloc = i;
1509+
1510+
/* Force memory writes to complete before letting h/w
1511+
* know there are new descriptors to fetch. (Only
1512+
* applicable for weak-ordered memory model archs,
1513+
* such as IA-64).
1514+
*/
1515+
wmb();
1516+
writel(i, rx_ring->tail);
1517+
}
15221518
}
15231519

15241520
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
69546950
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
69556951

69566952
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
6957-
/* notify HW of packet */
6958-
ixgbe_write_tail(tx_ring, i);
6953+
writel(i, tx_ring->tail);
6954+
6955+
/* we need this if more than one processor can write to our tail
6956+
* at a time, it synchronizes IO on IA64/Altix systems
6957+
*/
6958+
mmiowb();
69596959
}
69606960

69616961
return;

0 commit comments

Comments
 (0)