Skip to content

Commit 2e334ee

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
igb: Move rx_buffer related code in Rx cleanup path into separate function
In order to try and isolate things a bit further I am moving the code related to retrieving data from the rx_buffer_info structure into a separate function. Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Aaron Brown <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent cbc8e55 commit 2e334ee

File tree

1 file changed

+120
-86
lines changed

1 file changed

+120
-86
lines changed

drivers/net/ethernet/intel/igb/igb_main.c

Lines changed: 120 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -5928,6 +5928,74 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
59285928
return true;
59295929
}
59305930

5931+
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
5932+
union e1000_adv_rx_desc *rx_desc,
5933+
struct sk_buff *skb)
5934+
{
5935+
struct igb_rx_buffer *rx_buffer;
5936+
struct page *page;
5937+
5938+
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
5939+
5940+
/*
5941+
* This memory barrier is needed to keep us from reading
5942+
* any other fields out of the rx_desc until we know the
5943+
* RXD_STAT_DD bit is set
5944+
*/
5945+
rmb();
5946+
5947+
page = rx_buffer->page;
5948+
prefetchw(page);
5949+
5950+
if (likely(!skb)) {
5951+
void *page_addr = page_address(page) +
5952+
rx_buffer->page_offset;
5953+
5954+
/* prefetch first cache line of first page */
5955+
prefetch(page_addr);
5956+
#if L1_CACHE_BYTES < 128
5957+
prefetch(page_addr + L1_CACHE_BYTES);
5958+
#endif
5959+
5960+
/* allocate a skb to store the frags */
5961+
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5962+
IGB_RX_HDR_LEN);
5963+
if (unlikely(!skb)) {
5964+
rx_ring->rx_stats.alloc_failed++;
5965+
return NULL;
5966+
}
5967+
5968+
/*
5969+
* we will be copying header into skb->data in
5970+
* pskb_may_pull so it is in our interest to prefetch
5971+
* it now to avoid a possible cache miss
5972+
*/
5973+
prefetchw(skb->data);
5974+
}
5975+
5976+
/* we are reusing so sync this buffer for CPU use */
5977+
dma_sync_single_range_for_cpu(rx_ring->dev,
5978+
rx_buffer->dma,
5979+
rx_buffer->page_offset,
5980+
PAGE_SIZE / 2,
5981+
DMA_FROM_DEVICE);
5982+
5983+
/* pull page into skb */
5984+
if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
5985+
/* hand second half of page back to the ring */
5986+
igb_reuse_rx_page(rx_ring, rx_buffer);
5987+
} else {
5988+
/* we are not reusing the buffer so unmap it */
5989+
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
5990+
PAGE_SIZE, DMA_FROM_DEVICE);
5991+
}
5992+
5993+
/* clear contents of rx_buffer */
5994+
rx_buffer->page = NULL;
5995+
5996+
return skb;
5997+
}
5998+
59315999
static inline void igb_rx_checksum(struct igb_ring *ring,
59326000
union e1000_adv_rx_desc *rx_desc,
59336001
struct sk_buff *skb)
@@ -5977,6 +6045,34 @@ static inline void igb_rx_hash(struct igb_ring *ring,
59776045
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
59786046
}
59796047

6048+
/**
6049+
* igb_is_non_eop - process handling of non-EOP buffers
6050+
* @rx_ring: Rx ring being processed
6051+
* @rx_desc: Rx descriptor for current buffer
6052+
* @skb: current socket buffer containing buffer in progress
6053+
*
6054+
* This function updates next to clean. If the buffer is an EOP buffer
6055+
* this function exits returning false, otherwise it will place the
6056+
* sk_buff in the next buffer to be chained and return true indicating
6057+
* that this is in fact a non-EOP buffer.
6058+
**/
6059+
static bool igb_is_non_eop(struct igb_ring *rx_ring,
6060+
union e1000_adv_rx_desc *rx_desc)
6061+
{
6062+
u32 ntc = rx_ring->next_to_clean + 1;
6063+
6064+
/* fetch, update, and store next to clean */
6065+
ntc = (ntc < rx_ring->count) ? ntc : 0;
6066+
rx_ring->next_to_clean = ntc;
6067+
6068+
prefetch(IGB_RX_DESC(rx_ring, ntc));
6069+
6070+
if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6071+
return false;
6072+
6073+
return true;
6074+
}
6075+
59806076
/**
59816077
* igb_get_headlen - determine size of header for LRO/GRO
59826078
* @data: pointer to the start of the headers
@@ -6227,87 +6323,39 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
62276323
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
62286324
}
62296325

6230-
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6326+
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
62316327
{
62326328
struct igb_ring *rx_ring = q_vector->rx.ring;
6233-
union e1000_adv_rx_desc *rx_desc;
62346329
struct sk_buff *skb = rx_ring->skb;
62356330
unsigned int total_bytes = 0, total_packets = 0;
62366331
u16 cleaned_count = igb_desc_unused(rx_ring);
6237-
u16 i = rx_ring->next_to_clean;
6238-
6239-
rx_desc = IGB_RX_DESC(rx_ring, i);
6240-
6241-
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
6242-
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
6243-
struct page *page;
6244-
union e1000_adv_rx_desc *next_rxd;
6245-
6246-
i++;
6247-
if (i == rx_ring->count)
6248-
i = 0;
6249-
6250-
next_rxd = IGB_RX_DESC(rx_ring, i);
6251-
prefetch(next_rxd);
62526332

6253-
/*
6254-
* This memory barrier is needed to keep us from reading
6255-
* any other fields out of the rx_desc until we know the
6256-
* RXD_STAT_DD bit is set
6257-
*/
6258-
rmb();
6259-
6260-
page = buffer_info->page;
6261-
prefetchw(page);
6333+
do {
6334+
union e1000_adv_rx_desc *rx_desc;
62626335

6263-
if (likely(!skb)) {
6264-
void *page_addr = page_address(page) +
6265-
buffer_info->page_offset;
6336+
/* return some buffers to hardware, one at a time is too slow */
6337+
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6338+
igb_alloc_rx_buffers(rx_ring, cleaned_count);
6339+
cleaned_count = 0;
6340+
}
62666341

6267-
/* prefetch first cache line of first page */
6268-
prefetch(page_addr);
6269-
#if L1_CACHE_BYTES < 128
6270-
prefetch(page_addr + L1_CACHE_BYTES);
6271-
#endif
6342+
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
62726343

6273-
/* allocate a skb to store the frags */
6274-
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6275-
IGB_RX_HDR_LEN);
6276-
if (unlikely(!skb)) {
6277-
rx_ring->rx_stats.alloc_failed++;
6278-
break;
6279-
}
6344+
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6345+
break;
62806346

6281-
/*
6282-
* we will be copying header into skb->data in
6283-
* pskb_may_pull so it is in our interest to prefetch
6284-
* it now to avoid a possible cache miss
6285-
*/
6286-
prefetchw(skb->data);
6287-
}
6347+
/* retrieve a buffer from the ring */
6348+
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
62886349

6289-
/* we are reusing so sync this buffer for CPU use */
6290-
dma_sync_single_range_for_cpu(rx_ring->dev,
6291-
buffer_info->dma,
6292-
buffer_info->page_offset,
6293-
PAGE_SIZE / 2,
6294-
DMA_FROM_DEVICE);
6295-
6296-
/* pull page into skb */
6297-
if (igb_add_rx_frag(rx_ring, buffer_info, rx_desc, skb)) {
6298-
/* hand second half of page back to the ring */
6299-
igb_reuse_rx_page(rx_ring, buffer_info);
6300-
} else {
6301-
/* we are not reusing the buffer so unmap it */
6302-
dma_unmap_page(rx_ring->dev, buffer_info->dma,
6303-
PAGE_SIZE, DMA_FROM_DEVICE);
6304-
}
6350+
/* exit if we failed to retrieve a buffer */
6351+
if (!skb)
6352+
break;
63056353

6306-
/* clear contents of buffer_info */
6307-
buffer_info->page = NULL;
6354+
cleaned_count++;
63086355

6309-
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))
6310-
goto next_desc;
6356+
/* fetch next buffer in frame if non-eop */
6357+
if (igb_is_non_eop(rx_ring, rx_desc))
6358+
continue;
63116359

63126360
/* verify the packet layout is correct */
63136361
if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
@@ -6317,7 +6365,6 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
63176365

63186366
/* probably a little skewed due to removing CRC */
63196367
total_bytes += skb->len;
6320-
total_packets++;
63216368

63226369
/* populate checksum, timestamp, VLAN, and protocol */
63236370
igb_process_skb_fields(rx_ring, rx_desc, skb);
@@ -6327,26 +6374,13 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
63276374
/* reset skb pointer */
63286375
skb = NULL;
63296376

6330-
budget--;
6331-
next_desc:
6332-
if (!budget)
6333-
break;
6334-
6335-
cleaned_count++;
6336-
/* return some buffers to hardware, one at a time is too slow */
6337-
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6338-
igb_alloc_rx_buffers(rx_ring, cleaned_count);
6339-
cleaned_count = 0;
6340-
}
6341-
6342-
/* use prefetched values */
6343-
rx_desc = next_rxd;
6344-
}
6377+
/* update budget accounting */
6378+
total_packets++;
6379+
} while (likely(total_packets < budget));
63456380

63466381
/* place incomplete frames back on ring for completion */
63476382
rx_ring->skb = skb;
63486383

6349-
rx_ring->next_to_clean = i;
63506384
u64_stats_update_begin(&rx_ring->rx_syncp);
63516385
rx_ring->rx_stats.packets += total_packets;
63526386
rx_ring->rx_stats.bytes += total_bytes;
@@ -6357,7 +6391,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
63576391
if (cleaned_count)
63586392
igb_alloc_rx_buffers(rx_ring, cleaned_count);
63596393

6360-
return !!budget;
6394+
return (total_packets < budget);
63616395
}
63626396

63636397
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,

0 commit comments

Comments
 (0)