@@ -2383,6 +2383,90 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2383
2383
}
2384
2384
}
2385
2385
2386
+ static int hns3_alloc_skb (struct hns3_enet_ring * ring , int length ,
2387
+ unsigned char * va )
2388
+ {
2389
+ #define HNS3_NEED_ADD_FRAG 1
2390
+ struct hns3_desc_cb * desc_cb = & ring -> desc_cb [ring -> next_to_clean ];
2391
+ struct net_device * netdev = ring -> tqp -> handle -> kinfo .netdev ;
2392
+ struct sk_buff * skb ;
2393
+
2394
+ ring -> skb = napi_alloc_skb (& ring -> tqp_vector -> napi , HNS3_RX_HEAD_SIZE );
2395
+ skb = ring -> skb ;
2396
+ if (unlikely (!skb )) {
2397
+ netdev_err (netdev , "alloc rx skb fail\n" );
2398
+
2399
+ u64_stats_update_begin (& ring -> syncp );
2400
+ ring -> stats .sw_err_cnt ++ ;
2401
+ u64_stats_update_end (& ring -> syncp );
2402
+
2403
+ return - ENOMEM ;
2404
+ }
2405
+
2406
+ prefetchw (skb -> data );
2407
+
2408
+ ring -> pending_buf = 1 ;
2409
+ if (length <= HNS3_RX_HEAD_SIZE ) {
2410
+ memcpy (__skb_put (skb , length ), va , ALIGN (length , sizeof (long )));
2411
+
2412
+ /* We can reuse buffer as-is, just make sure it is local */
2413
+ if (likely (page_to_nid (desc_cb -> priv ) == numa_node_id ()))
2414
+ desc_cb -> reuse_flag = 1 ;
2415
+ else /* This page cannot be reused so discard it */
2416
+ put_page (desc_cb -> priv );
2417
+
2418
+ ring_ptr_move_fw (ring , next_to_clean );
2419
+ return 0 ;
2420
+ }
2421
+ u64_stats_update_begin (& ring -> syncp );
2422
+ ring -> stats .seg_pkt_cnt ++ ;
2423
+ u64_stats_update_end (& ring -> syncp );
2424
+
2425
+ ring -> pull_len = eth_get_headlen (va , HNS3_RX_HEAD_SIZE );
2426
+ __skb_put (skb , ring -> pull_len );
2427
+ hns3_nic_reuse_page (skb , 0 , ring , ring -> pull_len ,
2428
+ desc_cb );
2429
+ ring_ptr_move_fw (ring , next_to_clean );
2430
+
2431
+ return HNS3_NEED_ADD_FRAG ;
2432
+ }
2433
+
2434
+ static int hns3_add_frag (struct hns3_enet_ring * ring , struct hns3_desc * desc ,
2435
+ struct sk_buff * * out_skb , bool pending )
2436
+ {
2437
+ struct sk_buff * skb = * out_skb ;
2438
+ struct hns3_desc_cb * desc_cb ;
2439
+ struct hns3_desc * pre_desc ;
2440
+ u32 bd_base_info ;
2441
+ int pre_bd ;
2442
+
2443
+ /* if there is pending bd, the SW param next_to_clean has moved
2444
+ * to next and the next is NULL
2445
+ */
2446
+ if (pending ) {
2447
+ pre_bd = (ring -> next_to_clean - 1 + ring -> desc_num ) %
2448
+ ring -> desc_num ;
2449
+ pre_desc = & ring -> desc [pre_bd ];
2450
+ bd_base_info = le32_to_cpu (pre_desc -> rx .bd_base_info );
2451
+ } else {
2452
+ bd_base_info = le32_to_cpu (desc -> rx .bd_base_info );
2453
+ }
2454
+
2455
+ while (!hnae3_get_bit (bd_base_info , HNS3_RXD_FE_B )) {
2456
+ desc = & ring -> desc [ring -> next_to_clean ];
2457
+ desc_cb = & ring -> desc_cb [ring -> next_to_clean ];
2458
+ bd_base_info = le32_to_cpu (desc -> rx .bd_base_info );
2459
+ if (!hnae3_get_bit (bd_base_info , HNS3_RXD_VLD_B ))
2460
+ return - ENXIO ;
2461
+
2462
+ hns3_nic_reuse_page (skb , ring -> pending_buf , ring , 0 , desc_cb );
2463
+ ring_ptr_move_fw (ring , next_to_clean );
2464
+ ring -> pending_buf ++ ;
2465
+ }
2466
+
2467
+ return 0 ;
2468
+ }
2469
+
2386
2470
static void hns3_set_rx_skb_rss_type (struct hns3_enet_ring * ring ,
2387
2471
struct sk_buff * skb )
2388
2472
{
@@ -2399,18 +2483,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2399
2483
}
2400
2484
2401
2485
static int hns3_handle_rx_bd (struct hns3_enet_ring * ring ,
2402
- struct sk_buff * * out_skb , int * out_bnum )
2486
+ struct sk_buff * * out_skb )
2403
2487
{
2404
2488
struct net_device * netdev = ring -> tqp -> handle -> kinfo .netdev ;
2489
+ struct sk_buff * skb = ring -> skb ;
2405
2490
struct hns3_desc_cb * desc_cb ;
2406
2491
struct hns3_desc * desc ;
2407
- struct sk_buff * skb ;
2408
- unsigned char * va ;
2409
2492
u32 bd_base_info ;
2410
- int pull_len ;
2411
2493
u32 l234info ;
2412
2494
int length ;
2413
- int bnum ;
2495
+ int ret ;
2414
2496
2415
2497
desc = & ring -> desc [ring -> next_to_clean ];
2416
2498
desc_cb = & ring -> desc_cb [ring -> next_to_clean ];
@@ -2422,9 +2504,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2422
2504
2423
2505
/* Check valid BD */
2424
2506
if (unlikely (!hnae3_get_bit (bd_base_info , HNS3_RXD_VLD_B )))
2425
- return - EFAULT ;
2507
+ return - ENXIO ;
2426
2508
2427
- va = (unsigned char * )desc_cb -> buf + desc_cb -> page_offset ;
2509
+ if (!skb )
2510
+ ring -> va = (unsigned char * )desc_cb -> buf + desc_cb -> page_offset ;
2428
2511
2429
2512
/* Prefetch first cache line of first page
2430
2513
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2433,62 +2516,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2433
2516
* lines. In such a case, single fetch would suffice to cache in the
2434
2517
* relevant part of the header.
2435
2518
*/
2436
- prefetch (va );
2519
+ prefetch (ring -> va );
2437
2520
#if L1_CACHE_BYTES < 128
2438
- prefetch (va + L1_CACHE_BYTES );
2521
+ prefetch (ring -> va + L1_CACHE_BYTES );
2439
2522
#endif
2440
2523
2441
- skb = * out_skb = napi_alloc_skb (& ring -> tqp_vector -> napi ,
2442
- HNS3_RX_HEAD_SIZE );
2443
- if (unlikely (!skb )) {
2444
- netdev_err (netdev , "alloc rx skb fail\n" );
2524
+ if (!skb ) {
2525
+ ret = hns3_alloc_skb (ring , length , ring -> va );
2526
+ * out_skb = skb = ring -> skb ;
2445
2527
2446
- u64_stats_update_begin (& ring -> syncp );
2447
- ring -> stats .sw_err_cnt ++ ;
2448
- u64_stats_update_end (& ring -> syncp );
2449
-
2450
- return - ENOMEM ;
2451
- }
2452
-
2453
- prefetchw (skb -> data );
2454
-
2455
- bnum = 1 ;
2456
- if (length <= HNS3_RX_HEAD_SIZE ) {
2457
- memcpy (__skb_put (skb , length ), va , ALIGN (length , sizeof (long )));
2458
-
2459
- /* We can reuse buffer as-is, just make sure it is local */
2460
- if (likely (page_to_nid (desc_cb -> priv ) == numa_node_id ()))
2461
- desc_cb -> reuse_flag = 1 ;
2462
- else /* This page cannot be reused so discard it */
2463
- put_page (desc_cb -> priv );
2528
+ if (ret < 0 ) /* alloc buffer fail */
2529
+ return ret ;
2530
+ if (ret > 0 ) { /* need add frag */
2531
+ ret = hns3_add_frag (ring , desc , & skb , false);
2532
+ if (ret )
2533
+ return ret ;
2464
2534
2465
- ring_ptr_move_fw (ring , next_to_clean );
2535
+ /* As the head data may be changed when GRO enable, copy
2536
+ * the head data in after other data rx completed
2537
+ */
2538
+ memcpy (skb -> data , ring -> va ,
2539
+ ALIGN (ring -> pull_len , sizeof (long )));
2540
+ }
2466
2541
} else {
2467
- u64_stats_update_begin (& ring -> syncp );
2468
- ring -> stats .seg_pkt_cnt ++ ;
2469
- u64_stats_update_end (& ring -> syncp );
2470
-
2471
- pull_len = eth_get_headlen (va , HNS3_RX_HEAD_SIZE );
2472
-
2473
- memcpy (__skb_put (skb , pull_len ), va ,
2474
- ALIGN (pull_len , sizeof (long )));
2475
-
2476
- hns3_nic_reuse_page (skb , 0 , ring , pull_len , desc_cb );
2477
- ring_ptr_move_fw (ring , next_to_clean );
2542
+ ret = hns3_add_frag (ring , desc , & skb , true);
2543
+ if (ret )
2544
+ return ret ;
2478
2545
2479
- while (!hnae3_get_bit (bd_base_info , HNS3_RXD_FE_B )) {
2480
- desc = & ring -> desc [ring -> next_to_clean ];
2481
- desc_cb = & ring -> desc_cb [ring -> next_to_clean ];
2482
- bd_base_info = le32_to_cpu (desc -> rx .bd_base_info );
2483
- hns3_nic_reuse_page (skb , bnum , ring , 0 , desc_cb );
2484
- ring_ptr_move_fw (ring , next_to_clean );
2485
- bnum ++ ;
2486
- }
2546
+ /* As the head data may be changed when GRO enable, copy
2547
+ * the head data in after other data rx completed
2548
+ */
2549
+ memcpy (skb -> data , ring -> va ,
2550
+ ALIGN (ring -> pull_len , sizeof (long )));
2487
2551
}
2488
2552
2489
- * out_bnum = bnum ;
2490
-
2491
2553
l234info = le32_to_cpu (desc -> rx .l234_info );
2554
+ bd_base_info = le32_to_cpu (desc -> rx .bd_base_info );
2492
2555
2493
2556
/* Based on hw strategy, the tag offloaded will be stored at
2494
2557
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2539,6 +2602,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2539
2602
ring -> tqp_vector -> rx_group .total_bytes += skb -> len ;
2540
2603
2541
2604
hns3_rx_checksum (ring , skb , desc );
2605
+ * out_skb = skb ;
2542
2606
hns3_set_rx_skb_rss_type (ring , skb );
2543
2607
2544
2608
return 0 ;
@@ -2551,9 +2615,9 @@ int hns3_clean_rx_ring(
2551
2615
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2552
2616
struct net_device * netdev = ring -> tqp -> handle -> kinfo .netdev ;
2553
2617
int recv_pkts , recv_bds , clean_count , err ;
2554
- int unused_count = hns3_desc_unused (ring );
2555
- struct sk_buff * skb = NULL ;
2556
- int num , bnum = 0 ;
2618
+ int unused_count = hns3_desc_unused (ring ) - ring -> pending_buf ;
2619
+ struct sk_buff * skb = ring -> skb ;
2620
+ int num ;
2557
2621
2558
2622
num = readl_relaxed (ring -> tqp -> io_base + HNS3_RING_RX_RING_FBDNUM_REG );
2559
2623
rmb (); /* Make sure num taken effect before the other data is touched */
@@ -2567,24 +2631,32 @@ int hns3_clean_rx_ring(
2567
2631
hns3_nic_alloc_rx_buffers (ring ,
2568
2632
clean_count + unused_count );
2569
2633
clean_count = 0 ;
2570
- unused_count = hns3_desc_unused (ring );
2634
+ unused_count = hns3_desc_unused (ring ) -
2635
+ ring -> pending_buf ;
2571
2636
}
2572
2637
2573
2638
/* Poll one pkt */
2574
- err = hns3_handle_rx_bd (ring , & skb , & bnum );
2639
+ err = hns3_handle_rx_bd (ring , & skb );
2575
2640
if (unlikely (!skb )) /* This fault cannot be repaired */
2576
2641
goto out ;
2577
2642
2578
- recv_bds += bnum ;
2579
- clean_count += bnum ;
2580
- if (unlikely (err )) { /* Do jump the err */
2581
- recv_pkts ++ ;
2643
+ if (err == - ENXIO ) { /* Do not get FE for the packet */
2644
+ goto out ;
2645
+ } else if (unlikely (err )) { /* Do jump the err */
2646
+ recv_bds += ring -> pending_buf ;
2647
+ clean_count += ring -> pending_buf ;
2648
+ ring -> skb = NULL ;
2649
+ ring -> pending_buf = 0 ;
2582
2650
continue ;
2583
2651
}
2584
2652
2585
2653
/* Do update ip stack process */
2586
2654
skb -> protocol = eth_type_trans (skb , netdev );
2587
2655
rx_fn (ring , skb );
2656
+ recv_bds += ring -> pending_buf ;
2657
+ clean_count += ring -> pending_buf ;
2658
+ ring -> skb = NULL ;
2659
+ ring -> pending_buf = 0 ;
2588
2660
2589
2661
recv_pkts ++ ;
2590
2662
}
0 commit comments