@@ -182,15 +182,15 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
182
182
ena_init_io_rings_common (adapter , rxr , i );
183
183
184
184
/* TX specific ring state */
185
- txr -> ring_size = adapter -> tx_ring_size ;
185
+ txr -> ring_size = adapter -> requested_tx_ring_size ;
186
186
txr -> tx_max_header_size = ena_dev -> tx_max_header_size ;
187
187
txr -> tx_mem_queue_type = ena_dev -> tx_mem_queue_type ;
188
188
txr -> sgl_size = adapter -> max_tx_sgl_size ;
189
189
txr -> smoothed_interval =
190
190
ena_com_get_nonadaptive_moderation_interval_tx (ena_dev );
191
191
192
192
/* RX specific ring state */
193
- rxr -> ring_size = adapter -> rx_ring_size ;
193
+ rxr -> ring_size = adapter -> requested_rx_ring_size ;
194
194
rxr -> rx_copybreak = adapter -> rx_copybreak ;
195
195
rxr -> sgl_size = adapter -> max_rx_sgl_size ;
196
196
rxr -> smoothed_interval =
@@ -594,7 +594,6 @@ static void ena_free_rx_bufs(struct ena_adapter *adapter,
594
594
595
595
/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
596
596
* @adapter: board private structure
597
- *
598
597
*/
599
598
static void ena_refill_all_rx_bufs (struct ena_adapter * adapter )
600
599
{
@@ -1638,7 +1637,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1638
1637
ctx .qid = ena_qid ;
1639
1638
ctx .mem_queue_type = ena_dev -> tx_mem_queue_type ;
1640
1639
ctx .msix_vector = msix_vector ;
1641
- ctx .queue_size = adapter -> tx_ring_size ;
1640
+ ctx .queue_size = tx_ring -> ring_size ;
1642
1641
ctx .numa_node = cpu_to_node (tx_ring -> cpu );
1643
1642
1644
1643
rc = ena_com_create_io_queue (ena_dev , & ctx );
@@ -1705,7 +1704,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1705
1704
ctx .direction = ENA_COM_IO_QUEUE_DIRECTION_RX ;
1706
1705
ctx .mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST ;
1707
1706
ctx .msix_vector = msix_vector ;
1708
- ctx .queue_size = adapter -> rx_ring_size ;
1707
+ ctx .queue_size = rx_ring -> ring_size ;
1709
1708
ctx .numa_node = cpu_to_node (rx_ring -> cpu );
1710
1709
1711
1710
rc = ena_com_create_io_queue (ena_dev , & ctx );
@@ -1752,6 +1751,112 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1752
1751
return rc ;
1753
1752
}
1754
1753
1754
+ static void set_io_rings_size (struct ena_adapter * adapter ,
1755
+ int new_tx_size , int new_rx_size )
1756
+ {
1757
+ int i ;
1758
+
1759
+ for (i = 0 ; i < adapter -> num_queues ; i ++ ) {
1760
+ adapter -> tx_ring [i ].ring_size = new_tx_size ;
1761
+ adapter -> rx_ring [i ].ring_size = new_rx_size ;
1762
+ }
1763
+ }
1764
+
1765
+ /* This function allows queue allocation to backoff when the system is
1766
+ * low on memory. If there is not enough memory to allocate io queues
1767
+ * the driver will try to allocate smaller queues.
1768
+ *
1769
+ * The backoff algorithm is as follows:
1770
+ * 1. Try to allocate TX and RX and if successful.
1771
+ * 1.1. return success
1772
+ *
1773
+ * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
1774
+ *
1775
+ * 3. If TX or RX is smaller than 256
1776
+ * 3.1. return failure.
1777
+ * 4. else
1778
+ * 4.1. go back to 1.
1779
+ */
1780
+ static int create_queues_with_size_backoff (struct ena_adapter * adapter )
1781
+ {
1782
+ int rc , cur_rx_ring_size , cur_tx_ring_size ;
1783
+ int new_rx_ring_size , new_tx_ring_size ;
1784
+
1785
+ /* current queue sizes might be set to smaller than the requested
1786
+ * ones due to past queue allocation failures.
1787
+ */
1788
+ set_io_rings_size (adapter , adapter -> requested_tx_ring_size ,
1789
+ adapter -> requested_rx_ring_size );
1790
+
1791
+ while (1 ) {
1792
+ rc = ena_setup_all_tx_resources (adapter );
1793
+ if (rc )
1794
+ goto err_setup_tx ;
1795
+
1796
+ rc = ena_create_all_io_tx_queues (adapter );
1797
+ if (rc )
1798
+ goto err_create_tx_queues ;
1799
+
1800
+ rc = ena_setup_all_rx_resources (adapter );
1801
+ if (rc )
1802
+ goto err_setup_rx ;
1803
+
1804
+ rc = ena_create_all_io_rx_queues (adapter );
1805
+ if (rc )
1806
+ goto err_create_rx_queues ;
1807
+
1808
+ return 0 ;
1809
+
1810
+ err_create_rx_queues :
1811
+ ena_free_all_io_rx_resources (adapter );
1812
+ err_setup_rx :
1813
+ ena_destroy_all_tx_queues (adapter );
1814
+ err_create_tx_queues :
1815
+ ena_free_all_io_tx_resources (adapter );
1816
+ err_setup_tx :
1817
+ if (rc != - ENOMEM ) {
1818
+ netif_err (adapter , ifup , adapter -> netdev ,
1819
+ "Queue creation failed with error code %d\n" ,
1820
+ rc );
1821
+ return rc ;
1822
+ }
1823
+
1824
+ cur_tx_ring_size = adapter -> tx_ring [0 ].ring_size ;
1825
+ cur_rx_ring_size = adapter -> rx_ring [0 ].ring_size ;
1826
+
1827
+ netif_err (adapter , ifup , adapter -> netdev ,
1828
+ "Not enough memory to create queues with sizes TX=%d, RX=%d\n" ,
1829
+ cur_tx_ring_size , cur_rx_ring_size );
1830
+
1831
+ new_tx_ring_size = cur_tx_ring_size ;
1832
+ new_rx_ring_size = cur_rx_ring_size ;
1833
+
1834
+ /* Decrease the size of the larger queue, or
1835
+ * decrease both if they are the same size.
1836
+ */
1837
+ if (cur_rx_ring_size <= cur_tx_ring_size )
1838
+ new_tx_ring_size = cur_tx_ring_size / 2 ;
1839
+ if (cur_rx_ring_size >= cur_tx_ring_size )
1840
+ new_rx_ring_size = cur_rx_ring_size / 2 ;
1841
+
1842
+ if (cur_tx_ring_size < ENA_MIN_RING_SIZE ||
1843
+ cur_rx_ring_size < ENA_MIN_RING_SIZE ) {
1844
+ netif_err (adapter , ifup , adapter -> netdev ,
1845
+ "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n" ,
1846
+ ENA_MIN_RING_SIZE );
1847
+ return rc ;
1848
+ }
1849
+
1850
+ netif_err (adapter , ifup , adapter -> netdev ,
1851
+ "Retrying queue creation with sizes TX=%d, RX=%d\n" ,
1852
+ new_tx_ring_size ,
1853
+ new_rx_ring_size );
1854
+
1855
+ set_io_rings_size (adapter , new_tx_ring_size ,
1856
+ new_rx_ring_size );
1857
+ }
1858
+ }
1859
+
1755
1860
static int ena_up (struct ena_adapter * adapter )
1756
1861
{
1757
1862
int rc , i ;
@@ -1771,25 +1876,9 @@ static int ena_up(struct ena_adapter *adapter)
1771
1876
if (rc )
1772
1877
goto err_req_irq ;
1773
1878
1774
- /* allocate transmit descriptors */
1775
- rc = ena_setup_all_tx_resources (adapter );
1879
+ rc = create_queues_with_size_backoff (adapter );
1776
1880
if (rc )
1777
- goto err_setup_tx ;
1778
-
1779
- /* allocate receive descriptors */
1780
- rc = ena_setup_all_rx_resources (adapter );
1781
- if (rc )
1782
- goto err_setup_rx ;
1783
-
1784
- /* Create TX queues */
1785
- rc = ena_create_all_io_tx_queues (adapter );
1786
- if (rc )
1787
- goto err_create_tx_queues ;
1788
-
1789
- /* Create RX queues */
1790
- rc = ena_create_all_io_rx_queues (adapter );
1791
- if (rc )
1792
- goto err_create_rx_queues ;
1881
+ goto err_create_queues_with_backoff ;
1793
1882
1794
1883
rc = ena_up_complete (adapter );
1795
1884
if (rc )
@@ -1818,14 +1907,11 @@ static int ena_up(struct ena_adapter *adapter)
1818
1907
return rc ;
1819
1908
1820
1909
err_up :
1821
- ena_destroy_all_rx_queues (adapter );
1822
- err_create_rx_queues :
1823
1910
ena_destroy_all_tx_queues (adapter );
1824
- err_create_tx_queues :
1825
- ena_free_all_io_rx_resources (adapter );
1826
- err_setup_rx :
1827
1911
ena_free_all_io_tx_resources (adapter );
1828
- err_setup_tx :
1912
+ ena_destroy_all_rx_queues (adapter );
1913
+ ena_free_all_io_rx_resources (adapter );
1914
+ err_create_queues_with_backoff :
1829
1915
ena_free_io_irq (adapter );
1830
1916
err_req_irq :
1831
1917
ena_del_napi (adapter );
@@ -3296,17 +3382,14 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
3296
3382
max_tx_queue_size = rounddown_pow_of_two (max_tx_queue_size );
3297
3383
max_rx_queue_size = rounddown_pow_of_two (max_rx_queue_size );
3298
3384
3299
- tx_queue_size = min_t (u32 , tx_queue_size , max_tx_queue_size );
3300
- rx_queue_size = min_t (u32 , rx_queue_size , max_rx_queue_size );
3385
+ tx_queue_size = clamp_val (tx_queue_size , ENA_MIN_RING_SIZE ,
3386
+ max_tx_queue_size );
3387
+ rx_queue_size = clamp_val (rx_queue_size , ENA_MIN_RING_SIZE ,
3388
+ max_rx_queue_size );
3301
3389
3302
3390
tx_queue_size = rounddown_pow_of_two (tx_queue_size );
3303
3391
rx_queue_size = rounddown_pow_of_two (rx_queue_size );
3304
3392
3305
- if (unlikely (!rx_queue_size || !tx_queue_size )) {
3306
- dev_err (& ctx -> pdev -> dev , "Invalid queue size\n" );
3307
- return - EFAULT ;
3308
- }
3309
-
3310
3393
ctx -> max_tx_queue_size = max_tx_queue_size ;
3311
3394
ctx -> max_rx_queue_size = max_rx_queue_size ;
3312
3395
ctx -> tx_queue_size = tx_queue_size ;
@@ -3436,8 +3519,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3436
3519
adapter -> msg_enable = netif_msg_init (debug , DEFAULT_MSG_ENABLE );
3437
3520
adapter -> reset_reason = ENA_REGS_RESET_NORMAL ;
3438
3521
3439
- adapter -> tx_ring_size = calc_queue_ctx .tx_queue_size ;
3440
- adapter -> rx_ring_size = calc_queue_ctx .rx_queue_size ;
3522
+ adapter -> requested_tx_ring_size = calc_queue_ctx .tx_queue_size ;
3523
+ adapter -> requested_rx_ring_size = calc_queue_ctx .rx_queue_size ;
3441
3524
adapter -> max_tx_ring_size = calc_queue_ctx .max_tx_queue_size ;
3442
3525
adapter -> max_rx_ring_size = calc_queue_ctx .max_rx_queue_size ;
3443
3526
adapter -> max_tx_sgl_size = calc_queue_ctx .max_tx_sgl_size ;
0 commit comments