Skip to content

Commit 4f0b681

Browse files
tlfalconkuba-moo
authored andcommitted
ibmvnic: Introduce batched RX buffer descriptor transmission
Utilize the H_SEND_SUB_CRQ_INDIRECT hypervisor call to send multiple RX buffer descriptors to the device in one hypervisor call operation. This change will reduce the number of hypervisor calls and thus hypervisor call overhead needed to transmit RX buffer descriptors to the device. Signed-off-by: Thomas Falcon <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent f019fb6 commit 4f0b681

File tree

1 file changed

+37
-20
lines changed

1 file changed

+37
-20
lines changed

drivers/net/ethernet/ibm/ibmvnic.c

Lines changed: 37 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -306,9 +306,11 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
306306
int count = pool->size - atomic_read(&pool->available);
307307
u64 handle = adapter->rx_scrq[pool->index]->handle;
308308
struct device *dev = &adapter->vdev->dev;
309+
struct ibmvnic_ind_xmit_queue *ind_bufp;
310+
struct ibmvnic_sub_crq_queue *rx_scrq;
311+
union sub_crq *sub_crq;
309312
int buffers_added = 0;
310313
unsigned long lpar_rc;
311-
union sub_crq sub_crq;
312314
struct sk_buff *skb;
313315
unsigned int offset;
314316
dma_addr_t dma_addr;
@@ -320,6 +322,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
320322
if (!pool->active)
321323
return;
322324

325+
rx_scrq = adapter->rx_scrq[pool->index];
326+
ind_bufp = &rx_scrq->ind_buf;
323327
for (i = 0; i < count; ++i) {
324328
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325329
if (!skb) {
@@ -346,12 +350,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
346350
pool->rx_buff[index].pool_index = pool->index;
347351
pool->rx_buff[index].size = pool->buff_size;
348352

349-
memset(&sub_crq, 0, sizeof(sub_crq));
350-
sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351-
sub_crq.rx_add.correlator =
353+
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
354+
memset(sub_crq, 0, sizeof(*sub_crq));
355+
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
356+
sub_crq->rx_add.correlator =
352357
cpu_to_be64((u64)&pool->rx_buff[index]);
353-
sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354-
sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
358+
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
359+
sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
355360

356361
/* The length field of the sCRQ is defined to be 24 bits so the
357362
* buffer size needs to be left shifted by a byte before it is
@@ -361,29 +366,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
361366
#ifdef __LITTLE_ENDIAN__
362367
shift = 8;
363368
#endif
364-
sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365-
366-
lpar_rc = send_subcrq(adapter, handle, &sub_crq);
367-
if (lpar_rc != H_SUCCESS)
368-
goto failure;
369-
370-
buffers_added++;
371-
adapter->replenish_add_buff_success++;
369+
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
372370
pool->next_free = (pool->next_free + 1) % pool->size;
371+
if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
372+
i == count - 1) {
373+
lpar_rc =
374+
send_subcrq_indirect(adapter, handle,
375+
(u64)ind_bufp->indir_dma,
376+
(u64)ind_bufp->index);
377+
if (lpar_rc != H_SUCCESS)
378+
goto failure;
379+
buffers_added += ind_bufp->index;
380+
adapter->replenish_add_buff_success += ind_bufp->index;
381+
ind_bufp->index = 0;
382+
}
373383
}
374384
atomic_add(buffers_added, &pool->available);
375385
return;
376386

377387
failure:
378388
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379389
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
380-
pool->free_map[pool->next_free] = index;
381-
pool->rx_buff[index].skb = NULL;
382-
383-
dev_kfree_skb_any(skb);
384-
adapter->replenish_add_buff_failure++;
385-
atomic_add(buffers_added, &pool->available);
390+
for (i = ind_bufp->index - 1; i >= 0; --i) {
391+
struct ibmvnic_rx_buff *rx_buff;
386392

393+
pool->next_free = pool->next_free == 0 ?
394+
pool->size - 1 : pool->next_free - 1;
395+
sub_crq = &ind_bufp->indir_arr[i];
396+
rx_buff = (struct ibmvnic_rx_buff *)
397+
be64_to_cpu(sub_crq->rx_add.correlator);
398+
index = (int)(rx_buff - pool->rx_buff);
399+
pool->free_map[pool->next_free] = index;
400+
dev_kfree_skb_any(pool->rx_buff[index].skb);
401+
pool->rx_buff[index].skb = NULL;
402+
}
403+
ind_bufp->index = 0;
387404
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
388405
/* Disable buffer pool replenishment and report carrier off if
389406
* queue is closed or pending failover.

0 commit comments

Comments
 (0)