@@ -345,34 +345,14 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
345
345
return slots_used ;
346
346
}
347
347
348
- static int count_skb_frag_slots (struct sk_buff * skb )
349
- {
350
- int i , frags = skb_shinfo (skb )-> nr_frags ;
351
- int pages = 0 ;
352
-
353
- for (i = 0 ; i < frags ; i ++ ) {
354
- skb_frag_t * frag = skb_shinfo (skb )-> frags + i ;
355
- unsigned long size = skb_frag_size (frag );
356
- unsigned long offset = frag -> page_offset ;
357
-
358
- /* Skip unused frames from start of page */
359
- offset &= ~PAGE_MASK ;
360
- pages += PFN_UP (offset + size );
361
- }
362
- return pages ;
363
- }
364
-
365
- static int netvsc_get_slots (struct sk_buff * skb )
348
+ /* Estimate number of page buffers neede to transmit
349
+ * Need at most 2 for RNDIS header plus skb body and fragments.
350
+ */
351
+ static unsigned int netvsc_get_slots (const struct sk_buff * skb )
366
352
{
367
- char * data = skb -> data ;
368
- unsigned int offset = offset_in_page (data );
369
- unsigned int len = skb_headlen (skb );
370
- int slots ;
371
- int frag_slots ;
372
-
373
- slots = DIV_ROUND_UP (offset + len , PAGE_SIZE );
374
- frag_slots = count_skb_frag_slots (skb );
375
- return slots + frag_slots ;
353
+ return PFN_UP (offset_in_page (skb -> data ) + skb_headlen (skb ))
354
+ + skb_shinfo (skb )-> nr_frags
355
+ + 2 ;
376
356
}
377
357
378
358
static u32 net_checksum_info (struct sk_buff * skb )
@@ -410,21 +390,18 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
410
390
struct hv_page_buffer page_buf [MAX_PAGE_BUFFER_COUNT ];
411
391
struct hv_page_buffer * pb = page_buf ;
412
392
413
- /* We will atmost need two pages to describe the rndis
414
- * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
393
+ /* We can only transmit MAX_PAGE_BUFFER_COUNT number
415
394
* of pages in a single packet. If skb is scattered around
416
395
* more pages we try linearizing it.
417
396
*/
418
-
419
- num_data_pgs = netvsc_get_slots (skb ) + 2 ;
420
-
397
+ num_data_pgs = netvsc_get_slots (skb );
421
398
if (unlikely (num_data_pgs > MAX_PAGE_BUFFER_COUNT )) {
422
399
++ net_device_ctx -> eth_stats .tx_scattered ;
423
400
424
401
if (skb_linearize (skb ))
425
402
goto no_memory ;
426
403
427
- num_data_pgs = netvsc_get_slots (skb ) + 2 ;
404
+ num_data_pgs = netvsc_get_slots (skb );
428
405
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT ) {
429
406
++ net_device_ctx -> eth_stats .tx_too_big ;
430
407
goto drop ;
0 commit comments