@@ -1165,6 +1165,7 @@ static int __ibmvnic_open(struct net_device *netdev)
1165
1165
if (prev_state == VNIC_CLOSED )
1166
1166
enable_irq (adapter -> tx_scrq [i ]-> irq );
1167
1167
enable_scrq_irq (adapter , adapter -> tx_scrq [i ]);
1168
+ netdev_tx_reset_queue (netdev_get_tx_queue (netdev , i ));
1168
1169
}
1169
1170
1170
1171
rc = set_link_state (adapter , IBMVNIC_LOGICAL_LNK_UP );
@@ -1523,16 +1524,93 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1523
1524
return 0 ;
1524
1525
}
1525
1526
1527
+ static void ibmvnic_tx_scrq_clean_buffer (struct ibmvnic_adapter * adapter ,
1528
+ struct ibmvnic_sub_crq_queue * tx_scrq )
1529
+ {
1530
+ struct ibmvnic_ind_xmit_queue * ind_bufp ;
1531
+ struct ibmvnic_tx_buff * tx_buff ;
1532
+ struct ibmvnic_tx_pool * tx_pool ;
1533
+ union sub_crq tx_scrq_entry ;
1534
+ int queue_num ;
1535
+ int entries ;
1536
+ int index ;
1537
+ int i ;
1538
+
1539
+ ind_bufp = & tx_scrq -> ind_buf ;
1540
+ entries = (u64 )ind_bufp -> index ;
1541
+ queue_num = tx_scrq -> pool_index ;
1542
+
1543
+ for (i = entries - 1 ; i >= 0 ; -- i ) {
1544
+ tx_scrq_entry = ind_bufp -> indir_arr [i ];
1545
+ if (tx_scrq_entry .v1 .type != IBMVNIC_TX_DESC )
1546
+ continue ;
1547
+ index = be32_to_cpu (tx_scrq_entry .v1 .correlator );
1548
+ if (index & IBMVNIC_TSO_POOL_MASK ) {
1549
+ tx_pool = & adapter -> tso_pool [queue_num ];
1550
+ index &= ~IBMVNIC_TSO_POOL_MASK ;
1551
+ } else {
1552
+ tx_pool = & adapter -> tx_pool [queue_num ];
1553
+ }
1554
+ tx_pool -> free_map [tx_pool -> consumer_index ] = index ;
1555
+ tx_pool -> consumer_index = tx_pool -> consumer_index == 0 ?
1556
+ tx_pool -> num_buffers - 1 :
1557
+ tx_pool -> consumer_index - 1 ;
1558
+ tx_buff = & tx_pool -> tx_buff [index ];
1559
+ adapter -> netdev -> stats .tx_packets -- ;
1560
+ adapter -> netdev -> stats .tx_bytes -= tx_buff -> skb -> len ;
1561
+ adapter -> tx_stats_buffers [queue_num ].packets -- ;
1562
+ adapter -> tx_stats_buffers [queue_num ].bytes -=
1563
+ tx_buff -> skb -> len ;
1564
+ dev_kfree_skb_any (tx_buff -> skb );
1565
+ tx_buff -> skb = NULL ;
1566
+ adapter -> netdev -> stats .tx_dropped ++ ;
1567
+ }
1568
+ ind_bufp -> index = 0 ;
1569
+ if (atomic_sub_return (entries , & tx_scrq -> used ) <=
1570
+ (adapter -> req_tx_entries_per_subcrq / 2 ) &&
1571
+ __netif_subqueue_stopped (adapter -> netdev , queue_num )) {
1572
+ netif_wake_subqueue (adapter -> netdev , queue_num );
1573
+ netdev_dbg (adapter -> netdev , "Started queue %d\n" ,
1574
+ queue_num );
1575
+ }
1576
+ }
1577
+
1578
+ static int ibmvnic_tx_scrq_flush (struct ibmvnic_adapter * adapter ,
1579
+ struct ibmvnic_sub_crq_queue * tx_scrq )
1580
+ {
1581
+ struct ibmvnic_ind_xmit_queue * ind_bufp ;
1582
+ u64 dma_addr ;
1583
+ u64 entries ;
1584
+ u64 handle ;
1585
+ int rc ;
1586
+
1587
+ ind_bufp = & tx_scrq -> ind_buf ;
1588
+ dma_addr = (u64 )ind_bufp -> indir_dma ;
1589
+ entries = (u64 )ind_bufp -> index ;
1590
+ handle = tx_scrq -> handle ;
1591
+
1592
+ if (!entries )
1593
+ return 0 ;
1594
+ rc = send_subcrq_indirect (adapter , handle , dma_addr , entries );
1595
+ if (rc )
1596
+ ibmvnic_tx_scrq_clean_buffer (adapter , tx_scrq );
1597
+ else
1598
+ ind_bufp -> index = 0 ;
1599
+ return 0 ;
1600
+ }
1601
+
1526
1602
static netdev_tx_t ibmvnic_xmit (struct sk_buff * skb , struct net_device * netdev )
1527
1603
{
1528
1604
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
1529
1605
int queue_num = skb_get_queue_mapping (skb );
1530
1606
u8 * hdrs = (u8 * )& adapter -> tx_rx_desc_req ;
1531
1607
struct device * dev = & adapter -> vdev -> dev ;
1608
+ struct ibmvnic_ind_xmit_queue * ind_bufp ;
1532
1609
struct ibmvnic_tx_buff * tx_buff = NULL ;
1533
1610
struct ibmvnic_sub_crq_queue * tx_scrq ;
1534
1611
struct ibmvnic_tx_pool * tx_pool ;
1535
1612
unsigned int tx_send_failed = 0 ;
1613
+ netdev_tx_t ret = NETDEV_TX_OK ;
1536
1614
unsigned int tx_map_failed = 0 ;
1537
1615
unsigned int tx_dropped = 0 ;
1538
1616
unsigned int tx_packets = 0 ;
@@ -1546,8 +1624,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1546
1624
unsigned char * dst ;
1547
1625
int index = 0 ;
1548
1626
u8 proto = 0 ;
1549
- u64 handle ;
1550
- netdev_tx_t ret = NETDEV_TX_OK ;
1627
+
1628
+ tx_scrq = adapter -> tx_scrq [queue_num ];
1629
+ txq = netdev_get_tx_queue (netdev , queue_num );
1630
+ ind_bufp = & tx_scrq -> ind_buf ;
1551
1631
1552
1632
if (test_bit (0 , & adapter -> resetting )) {
1553
1633
if (!netif_subqueue_stopped (netdev , skb ))
@@ -1557,31 +1637,30 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1557
1637
tx_send_failed ++ ;
1558
1638
tx_dropped ++ ;
1559
1639
ret = NETDEV_TX_OK ;
1640
+ ibmvnic_tx_scrq_flush (adapter , tx_scrq );
1560
1641
goto out ;
1561
1642
}
1562
1643
1563
1644
if (ibmvnic_xmit_workarounds (skb , netdev )) {
1564
1645
tx_dropped ++ ;
1565
1646
tx_send_failed ++ ;
1566
1647
ret = NETDEV_TX_OK ;
1648
+ ibmvnic_tx_scrq_flush (adapter , tx_scrq );
1567
1649
goto out ;
1568
1650
}
1569
1651
if (skb_is_gso (skb ))
1570
1652
tx_pool = & adapter -> tso_pool [queue_num ];
1571
1653
else
1572
1654
tx_pool = & adapter -> tx_pool [queue_num ];
1573
1655
1574
- tx_scrq = adapter -> tx_scrq [queue_num ];
1575
- txq = netdev_get_tx_queue (netdev , skb_get_queue_mapping (skb ));
1576
- handle = tx_scrq -> handle ;
1577
-
1578
1656
index = tx_pool -> free_map [tx_pool -> consumer_index ];
1579
1657
1580
1658
if (index == IBMVNIC_INVALID_MAP ) {
1581
1659
dev_kfree_skb_any (skb );
1582
1660
tx_send_failed ++ ;
1583
1661
tx_dropped ++ ;
1584
1662
ret = NETDEV_TX_OK ;
1663
+ ibmvnic_tx_scrq_flush (adapter , tx_scrq );
1585
1664
goto out ;
1586
1665
}
1587
1666
@@ -1666,55 +1745,29 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1666
1745
tx_crq .v1 .mss = cpu_to_be16 (skb_shinfo (skb )-> gso_size );
1667
1746
hdrs += 2 ;
1668
1747
}
1669
- /* determine if l2/3/4 headers are sent to firmware */
1670
- if ((* hdrs >> 7 ) & 1 ) {
1748
+
1749
+ if ((* hdrs >> 7 ) & 1 )
1671
1750
build_hdr_descs_arr (tx_buff , & num_entries , * hdrs );
1672
- tx_crq .v1 .n_crq_elem = num_entries ;
1673
- tx_buff -> num_entries = num_entries ;
1674
- tx_buff -> indir_arr [0 ] = tx_crq ;
1675
- tx_buff -> indir_dma = dma_map_single (dev , tx_buff -> indir_arr ,
1676
- sizeof (tx_buff -> indir_arr ),
1677
- DMA_TO_DEVICE );
1678
- if (dma_mapping_error (dev , tx_buff -> indir_dma )) {
1679
- dev_kfree_skb_any (skb );
1680
- tx_buff -> skb = NULL ;
1681
- if (!firmware_has_feature (FW_FEATURE_CMO ))
1682
- dev_err (dev , "tx: unable to map descriptor array\n" );
1683
- tx_map_failed ++ ;
1684
- tx_dropped ++ ;
1685
- ret = NETDEV_TX_OK ;
1686
- goto tx_err_out ;
1687
- }
1688
- lpar_rc = send_subcrq_indirect (adapter , handle ,
1689
- (u64 )tx_buff -> indir_dma ,
1690
- (u64 )num_entries );
1691
- dma_unmap_single (dev , tx_buff -> indir_dma ,
1692
- sizeof (tx_buff -> indir_arr ), DMA_TO_DEVICE );
1693
- } else {
1694
- tx_buff -> num_entries = num_entries ;
1695
- lpar_rc = send_subcrq (adapter , handle ,
1696
- & tx_crq );
1697
- }
1698
- if (lpar_rc != H_SUCCESS ) {
1699
- if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER )
1700
- dev_err_ratelimited (dev , "tx: send failed\n" );
1701
- dev_kfree_skb_any (skb );
1702
- tx_buff -> skb = NULL ;
1703
1751
1704
- if (lpar_rc == H_CLOSED || adapter -> failover_pending ) {
1705
- /* Disable TX and report carrier off if queue is closed
1706
- * or pending failover.
1707
- * Firmware guarantees that a signal will be sent to the
1708
- * driver, triggering a reset or some other action.
1709
- */
1710
- netif_tx_stop_all_queues (netdev );
1711
- netif_carrier_off (netdev );
1712
- }
1752
+ tx_crq .v1 .n_crq_elem = num_entries ;
1753
+ tx_buff -> num_entries = num_entries ;
1754
+ /* flush buffer if current entry can not fit */
1755
+ if (num_entries + ind_bufp -> index > IBMVNIC_MAX_IND_DESCS ) {
1756
+ lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq );
1757
+ if (lpar_rc != H_SUCCESS )
1758
+ goto tx_flush_err ;
1759
+ }
1713
1760
1714
- tx_send_failed ++ ;
1715
- tx_dropped ++ ;
1716
- ret = NETDEV_TX_OK ;
1717
- goto tx_err_out ;
1761
+ tx_buff -> indir_arr [0 ] = tx_crq ;
1762
+ memcpy (& ind_bufp -> indir_arr [ind_bufp -> index ], tx_buff -> indir_arr ,
1763
+ num_entries * sizeof (struct ibmvnic_generic_scrq ));
1764
+ ind_bufp -> index += num_entries ;
1765
+ if (__netdev_tx_sent_queue (txq , skb -> len ,
1766
+ netdev_xmit_more () &&
1767
+ ind_bufp -> index < IBMVNIC_MAX_IND_DESCS )) {
1768
+ lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq );
1769
+ if (lpar_rc != H_SUCCESS )
1770
+ goto tx_err ;
1718
1771
}
1719
1772
1720
1773
if (atomic_add_return (num_entries , & tx_scrq -> used )
@@ -1729,14 +1782,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1729
1782
ret = NETDEV_TX_OK ;
1730
1783
goto out ;
1731
1784
1732
- tx_err_out :
1733
- /* roll back consumer index and map array*/
1734
- if (tx_pool -> consumer_index == 0 )
1735
- tx_pool -> consumer_index =
1736
- tx_pool -> num_buffers - 1 ;
1737
- else
1738
- tx_pool -> consumer_index -- ;
1739
- tx_pool -> free_map [tx_pool -> consumer_index ] = index ;
1785
+ tx_flush_err :
1786
+ dev_kfree_skb_any (skb );
1787
+ tx_buff -> skb = NULL ;
1788
+ tx_pool -> consumer_index = tx_pool -> consumer_index == 0 ?
1789
+ tx_pool -> num_buffers - 1 :
1790
+ tx_pool -> consumer_index - 1 ;
1791
+ tx_dropped ++ ;
1792
+ tx_err :
1793
+ if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER )
1794
+ dev_err_ratelimited (dev , "tx: send failed\n" );
1795
+
1796
+ if (lpar_rc == H_CLOSED || adapter -> failover_pending ) {
1797
+ /* Disable TX and report carrier off if queue is closed
1798
+ * or pending failover.
1799
+ * Firmware guarantees that a signal will be sent to the
1800
+ * driver, triggering a reset or some other action.
1801
+ */
1802
+ netif_tx_stop_all_queues (netdev );
1803
+ netif_carrier_off (netdev );
1804
+ }
1740
1805
out :
1741
1806
netdev -> stats .tx_dropped += tx_dropped ;
1742
1807
netdev -> stats .tx_bytes += tx_bytes ;
@@ -3117,6 +3182,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3117
3182
struct device * dev = & adapter -> vdev -> dev ;
3118
3183
struct ibmvnic_tx_pool * tx_pool ;
3119
3184
struct ibmvnic_tx_buff * txbuff ;
3185
+ struct netdev_queue * txq ;
3120
3186
union sub_crq * next ;
3121
3187
int index ;
3122
3188
int i , j ;
@@ -3125,6 +3191,8 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3125
3191
while (pending_scrq (adapter , scrq )) {
3126
3192
unsigned int pool = scrq -> pool_index ;
3127
3193
int num_entries = 0 ;
3194
+ int total_bytes = 0 ;
3195
+ int num_packets = 0 ;
3128
3196
3129
3197
next = ibmvnic_next_scrq (adapter , scrq );
3130
3198
for (i = 0 ; i < next -> tx_comp .num_comps ; i ++ ) {
@@ -3150,13 +3218,16 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3150
3218
txbuff -> data_dma [j ] = 0 ;
3151
3219
}
3152
3220
3153
- if (txbuff -> last_frag ) {
3154
- dev_kfree_skb_any (txbuff -> skb );
3221
+ num_packets ++ ;
3222
+ num_entries += txbuff -> num_entries ;
3223
+ if (txbuff -> skb ) {
3224
+ total_bytes += txbuff -> skb -> len ;
3225
+ dev_consume_skb_irq (txbuff -> skb );
3155
3226
txbuff -> skb = NULL ;
3227
+ } else {
3228
+ netdev_warn (adapter -> netdev ,
3229
+ "TX completion received with NULL socket buffer\n" );
3156
3230
}
3157
-
3158
- num_entries += txbuff -> num_entries ;
3159
-
3160
3231
tx_pool -> free_map [tx_pool -> producer_index ] = index ;
3161
3232
tx_pool -> producer_index =
3162
3233
(tx_pool -> producer_index + 1 ) %
@@ -3165,6 +3236,9 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3165
3236
/* remove tx_comp scrq*/
3166
3237
next -> tx_comp .first = 0 ;
3167
3238
3239
+ txq = netdev_get_tx_queue (adapter -> netdev , scrq -> pool_index );
3240
+ netdev_tx_completed_queue (txq , num_packets , total_bytes );
3241
+
3168
3242
if (atomic_sub_return (num_entries , & scrq -> used ) <=
3169
3243
(adapter -> req_tx_entries_per_subcrq / 2 ) &&
3170
3244
__netif_subqueue_stopped (adapter -> netdev ,
0 commit comments