@@ -75,6 +75,9 @@ struct receive_queue {
75
75
/* Chain pages by the private ptr. */
76
76
struct page * pages ;
77
77
78
+ /* Page frag for packet buffer allocation. */
79
+ struct page_frag alloc_frag ;
80
+
78
81
/* RX: fragments + linear part + virtio header */
79
82
struct scatterlist sg [MAX_SKB_FRAGS + 2 ];
80
83
@@ -123,11 +126,6 @@ struct virtnet_info {
123
126
/* Lock for config space updates */
124
127
struct mutex config_lock ;
125
128
126
- /* Page_frag for GFP_KERNEL packet buffer allocation when we run
127
- * low on memory.
128
- */
129
- struct page_frag alloc_frag ;
130
-
131
129
/* Does the affinity hint is set for virtqueues? */
132
130
bool affinity_hint_set ;
133
131
@@ -333,8 +331,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
333
331
int num_buf = hdr -> mhdr .num_buffers ;
334
332
struct page * page = virt_to_head_page (buf );
335
333
int offset = buf - page_address (page );
336
- struct sk_buff * head_skb = page_to_skb ( rq , page , offset , len ,
337
- MERGE_BUFFER_LEN );
334
+ unsigned int truesize = max_t ( unsigned int , len , MERGE_BUFFER_LEN );
335
+ struct sk_buff * head_skb = page_to_skb ( rq , page , offset , len , truesize );
338
336
struct sk_buff * curr_skb = head_skb ;
339
337
340
338
if (unlikely (!curr_skb ))
@@ -350,11 +348,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
350
348
dev -> stats .rx_length_errors ++ ;
351
349
goto err_buf ;
352
350
}
353
- if (unlikely (len > MERGE_BUFFER_LEN )) {
354
- pr_debug ("%s: rx error: merge buffer too long\n" ,
355
- dev -> name );
356
- len = MERGE_BUFFER_LEN ;
357
- }
358
351
359
352
page = virt_to_head_page (buf );
360
353
@@ -372,19 +365,20 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
372
365
head_skb -> truesize += nskb -> truesize ;
373
366
num_skb_frags = 0 ;
374
367
}
368
+ truesize = max_t (unsigned int , len , MERGE_BUFFER_LEN );
375
369
if (curr_skb != head_skb ) {
376
370
head_skb -> data_len += len ;
377
371
head_skb -> len += len ;
378
- head_skb -> truesize += MERGE_BUFFER_LEN ;
372
+ head_skb -> truesize += truesize ;
379
373
}
380
374
offset = buf - page_address (page );
381
375
if (skb_can_coalesce (curr_skb , num_skb_frags , page , offset )) {
382
376
put_page (page );
383
377
skb_coalesce_rx_frag (curr_skb , num_skb_frags - 1 ,
384
- len , MERGE_BUFFER_LEN );
378
+ len , truesize );
385
379
} else {
386
380
skb_add_rx_frag (curr_skb , num_skb_frags , page ,
387
- offset , len , MERGE_BUFFER_LEN );
381
+ offset , len , truesize );
388
382
}
389
383
}
390
384
@@ -573,25 +567,24 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
573
567
574
568
static int add_recvbuf_mergeable (struct receive_queue * rq , gfp_t gfp )
575
569
{
576
- struct virtnet_info * vi = rq -> vq -> vdev -> priv ;
577
- char * buf = NULL ;
570
+ struct page_frag * alloc_frag = & rq -> alloc_frag ;
571
+ char * buf ;
578
572
int err ;
573
+ unsigned int len , hole ;
579
574
580
- if (gfp & __GFP_WAIT ) {
581
- if (skb_page_frag_refill (MERGE_BUFFER_LEN , & vi -> alloc_frag ,
582
- gfp )) {
583
- buf = (char * )page_address (vi -> alloc_frag .page ) +
584
- vi -> alloc_frag .offset ;
585
- get_page (vi -> alloc_frag .page );
586
- vi -> alloc_frag .offset += MERGE_BUFFER_LEN ;
587
- }
588
- } else {
589
- buf = netdev_alloc_frag (MERGE_BUFFER_LEN );
590
- }
591
- if (!buf )
575
+ if (unlikely (!skb_page_frag_refill (MERGE_BUFFER_LEN , alloc_frag , gfp )))
592
576
return - ENOMEM ;
577
+ buf = (char * )page_address (alloc_frag -> page ) + alloc_frag -> offset ;
578
+ get_page (alloc_frag -> page );
579
+ len = MERGE_BUFFER_LEN ;
580
+ alloc_frag -> offset += len ;
581
+ hole = alloc_frag -> size - alloc_frag -> offset ;
582
+ if (hole < MERGE_BUFFER_LEN ) {
583
+ len += hole ;
584
+ alloc_frag -> offset += hole ;
585
+ }
593
586
594
- sg_init_one (rq -> sg , buf , MERGE_BUFFER_LEN );
587
+ sg_init_one (rq -> sg , buf , len );
595
588
err = virtqueue_add_inbuf (rq -> vq , rq -> sg , 1 , buf , gfp );
596
589
if (err < 0 )
597
590
put_page (virt_to_head_page (buf ));
@@ -612,6 +605,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
612
605
int err ;
613
606
bool oom ;
614
607
608
+ gfp |= __GFP_COLD ;
615
609
do {
616
610
if (vi -> mergeable_rx_bufs )
617
611
err = add_recvbuf_mergeable (rq , gfp );
@@ -1368,6 +1362,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
1368
1362
}
1369
1363
}
1370
1364
1365
+ static void free_receive_page_frags (struct virtnet_info * vi )
1366
+ {
1367
+ int i ;
1368
+ for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
1369
+ if (vi -> rq [i ].alloc_frag .page )
1370
+ put_page (vi -> rq [i ].alloc_frag .page );
1371
+ }
1372
+
1371
1373
static void free_unused_bufs (struct virtnet_info * vi )
1372
1374
{
1373
1375
void * buf ;
@@ -1695,9 +1697,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1695
1697
unregister_netdev (dev );
1696
1698
free_vqs :
1697
1699
cancel_delayed_work_sync (& vi -> refill );
1700
+ free_receive_page_frags (vi );
1698
1701
virtnet_del_vqs (vi );
1699
- if (vi -> alloc_frag .page )
1700
- put_page (vi -> alloc_frag .page );
1701
1702
free_stats :
1702
1703
free_percpu (vi -> stats );
1703
1704
free :
@@ -1714,6 +1715,8 @@ static void remove_vq_common(struct virtnet_info *vi)
1714
1715
1715
1716
free_receive_bufs (vi );
1716
1717
1718
+ free_receive_page_frags (vi );
1719
+
1717
1720
virtnet_del_vqs (vi );
1718
1721
}
1719
1722
@@ -1731,8 +1734,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1731
1734
unregister_netdev (vi -> dev );
1732
1735
1733
1736
remove_vq_common (vi );
1734
- if (vi -> alloc_frag .page )
1735
- put_page (vi -> alloc_frag .page );
1736
1737
1737
1738
flush_work (& vi -> config_work );
1738
1739
0 commit comments