@@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
3546
3546
return page_count (cb -> priv ) == cb -> pagecnt_bias ;
3547
3547
}
3548
3548
3549
+ static int hns3_handle_rx_copybreak (struct sk_buff * skb , int i ,
3550
+ struct hns3_enet_ring * ring ,
3551
+ int pull_len ,
3552
+ struct hns3_desc_cb * desc_cb )
3553
+ {
3554
+ struct hns3_desc * desc = & ring -> desc [ring -> next_to_clean ];
3555
+ u32 frag_offset = desc_cb -> page_offset + pull_len ;
3556
+ int size = le16_to_cpu (desc -> rx .size );
3557
+ u32 frag_size = size - pull_len ;
3558
+ void * frag = napi_alloc_frag (frag_size );
3559
+
3560
+ if (unlikely (!frag )) {
3561
+ u64_stats_update_begin (& ring -> syncp );
3562
+ ring -> stats .frag_alloc_err ++ ;
3563
+ u64_stats_update_end (& ring -> syncp );
3564
+
3565
+ hns3_rl_err (ring_to_netdev (ring ),
3566
+ "failed to allocate rx frag\n" );
3567
+ return - ENOMEM ;
3568
+ }
3569
+
3570
+ desc_cb -> reuse_flag = 1 ;
3571
+ memcpy (frag , desc_cb -> buf + frag_offset , frag_size );
3572
+ skb_add_rx_frag (skb , i , virt_to_page (frag ),
3573
+ offset_in_page (frag ), frag_size , frag_size );
3574
+
3575
+ u64_stats_update_begin (& ring -> syncp );
3576
+ ring -> stats .frag_alloc ++ ;
3577
+ u64_stats_update_end (& ring -> syncp );
3578
+ return 0 ;
3579
+ }
3580
+
3549
3581
static void hns3_nic_reuse_page (struct sk_buff * skb , int i ,
3550
3582
struct hns3_enet_ring * ring , int pull_len ,
3551
3583
struct hns3_desc_cb * desc_cb )
@@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3555
3587
int size = le16_to_cpu (desc -> rx .size );
3556
3588
u32 truesize = hns3_buf_size (ring );
3557
3589
u32 frag_size = size - pull_len ;
3590
+ int ret = 0 ;
3558
3591
bool reused ;
3559
3592
3560
3593
if (ring -> page_pool ) {
@@ -3589,27 +3622,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3589
3622
desc_cb -> page_offset = 0 ;
3590
3623
desc_cb -> reuse_flag = 1 ;
3591
3624
} else if (frag_size <= ring -> rx_copybreak ) {
3592
- void * frag = napi_alloc_frag (frag_size );
3593
-
3594
- if (unlikely (!frag )) {
3595
- u64_stats_update_begin (& ring -> syncp );
3596
- ring -> stats .frag_alloc_err ++ ;
3597
- u64_stats_update_end (& ring -> syncp );
3598
-
3599
- hns3_rl_err (ring_to_netdev (ring ),
3600
- "failed to allocate rx frag\n" );
3625
+ ret = hns3_handle_rx_copybreak (skb , i , ring , pull_len , desc_cb );
3626
+ if (ret )
3601
3627
goto out ;
3602
- }
3603
-
3604
- desc_cb -> reuse_flag = 1 ;
3605
- memcpy (frag , desc_cb -> buf + frag_offset , frag_size );
3606
- skb_add_rx_frag (skb , i , virt_to_page (frag ),
3607
- offset_in_page (frag ), frag_size , frag_size );
3608
-
3609
- u64_stats_update_begin (& ring -> syncp );
3610
- ring -> stats .frag_alloc ++ ;
3611
- u64_stats_update_end (& ring -> syncp );
3612
- return ;
3613
3628
}
3614
3629
3615
3630
out :
0 commit comments