@@ -306,9 +306,11 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
306
306
int count = pool -> size - atomic_read (& pool -> available );
307
307
u64 handle = adapter -> rx_scrq [pool -> index ]-> handle ;
308
308
struct device * dev = & adapter -> vdev -> dev ;
309
+ struct ibmvnic_ind_xmit_queue * ind_bufp ;
310
+ struct ibmvnic_sub_crq_queue * rx_scrq ;
311
+ union sub_crq * sub_crq ;
309
312
int buffers_added = 0 ;
310
313
unsigned long lpar_rc ;
311
- union sub_crq sub_crq ;
312
314
struct sk_buff * skb ;
313
315
unsigned int offset ;
314
316
dma_addr_t dma_addr ;
@@ -320,6 +322,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
320
322
if (!pool -> active )
321
323
return ;
322
324
325
+ rx_scrq = adapter -> rx_scrq [pool -> index ];
326
+ ind_bufp = & rx_scrq -> ind_buf ;
323
327
for (i = 0 ; i < count ; ++ i ) {
324
328
skb = alloc_skb (pool -> buff_size , GFP_ATOMIC );
325
329
if (!skb ) {
@@ -346,12 +350,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
346
350
pool -> rx_buff [index ].pool_index = pool -> index ;
347
351
pool -> rx_buff [index ].size = pool -> buff_size ;
348
352
349
- memset (& sub_crq , 0 , sizeof (sub_crq ));
350
- sub_crq .rx_add .first = IBMVNIC_CRQ_CMD ;
351
- sub_crq .rx_add .correlator =
353
+ sub_crq = & ind_bufp -> indir_arr [ind_bufp -> index ++ ];
354
+ memset (sub_crq , 0 , sizeof (* sub_crq ));
355
+ sub_crq -> rx_add .first = IBMVNIC_CRQ_CMD ;
356
+ sub_crq -> rx_add .correlator =
352
357
cpu_to_be64 ((u64 )& pool -> rx_buff [index ]);
353
- sub_crq . rx_add .ioba = cpu_to_be32 (dma_addr );
354
- sub_crq . rx_add .map_id = pool -> long_term_buff .map_id ;
358
+ sub_crq -> rx_add .ioba = cpu_to_be32 (dma_addr );
359
+ sub_crq -> rx_add .map_id = pool -> long_term_buff .map_id ;
355
360
356
361
/* The length field of the sCRQ is defined to be 24 bits so the
357
362
* buffer size needs to be left shifted by a byte before it is
@@ -361,29 +366,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
361
366
#ifdef __LITTLE_ENDIAN__
362
367
shift = 8 ;
363
368
#endif
364
- sub_crq .rx_add .len = cpu_to_be32 (pool -> buff_size << shift );
365
-
366
- lpar_rc = send_subcrq (adapter , handle , & sub_crq );
367
- if (lpar_rc != H_SUCCESS )
368
- goto failure ;
369
-
370
- buffers_added ++ ;
371
- adapter -> replenish_add_buff_success ++ ;
369
+ sub_crq -> rx_add .len = cpu_to_be32 (pool -> buff_size << shift );
372
370
pool -> next_free = (pool -> next_free + 1 ) % pool -> size ;
371
+ if (ind_bufp -> index == IBMVNIC_MAX_IND_DESCS ||
372
+ i == count - 1 ) {
373
+ lpar_rc =
374
+ send_subcrq_indirect (adapter , handle ,
375
+ (u64 )ind_bufp -> indir_dma ,
376
+ (u64 )ind_bufp -> index );
377
+ if (lpar_rc != H_SUCCESS )
378
+ goto failure ;
379
+ buffers_added += ind_bufp -> index ;
380
+ adapter -> replenish_add_buff_success += ind_bufp -> index ;
381
+ ind_bufp -> index = 0 ;
382
+ }
373
383
}
374
384
atomic_add (buffers_added , & pool -> available );
375
385
return ;
376
386
377
387
failure :
378
388
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED )
379
389
dev_err_ratelimited (dev , "rx: replenish packet buffer failed\n" );
380
- pool -> free_map [pool -> next_free ] = index ;
381
- pool -> rx_buff [index ].skb = NULL ;
382
-
383
- dev_kfree_skb_any (skb );
384
- adapter -> replenish_add_buff_failure ++ ;
385
- atomic_add (buffers_added , & pool -> available );
390
+ for (i = ind_bufp -> index - 1 ; i >= 0 ; -- i ) {
391
+ struct ibmvnic_rx_buff * rx_buff ;
386
392
393
+ pool -> next_free = pool -> next_free == 0 ?
394
+ pool -> size - 1 : pool -> next_free - 1 ;
395
+ sub_crq = & ind_bufp -> indir_arr [i ];
396
+ rx_buff = (struct ibmvnic_rx_buff * )
397
+ be64_to_cpu (sub_crq -> rx_add .correlator );
398
+ index = (int )(rx_buff - pool -> rx_buff );
399
+ pool -> free_map [pool -> next_free ] = index ;
400
+ dev_kfree_skb_any (pool -> rx_buff [index ].skb );
401
+ pool -> rx_buff [index ].skb = NULL ;
402
+ }
403
+ ind_bufp -> index = 0 ;
387
404
if (lpar_rc == H_CLOSED || adapter -> failover_pending ) {
388
405
/* Disable buffer pool replenishment and report carrier off if
389
406
* queue is closed or pending failover.
0 commit comments