@@ -1601,6 +1601,44 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
1601
1601
INTR_MASK (hw -> total_pfs ) & ~1ULL );
1602
1602
}
1603
1603
1604
+ static void rvu_flr_handler (struct work_struct * work )
1605
+ {
1606
+ struct rvu_work * flrwork = container_of (work , struct rvu_work , work );
1607
+ struct rvu * rvu = flrwork -> rvu ;
1608
+ u16 pf ;
1609
+
1610
+ pf = flrwork - rvu -> flr_wrk ;
1611
+
1612
+ /* Signal FLR finish */
1613
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFTRPEND , BIT_ULL (pf ));
1614
+
1615
+ /* Enable interrupt */
1616
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT_ENA_W1S , BIT_ULL (pf ));
1617
+ }
1618
+
1619
+ static irqreturn_t rvu_flr_intr_handler (int irq , void * rvu_irq )
1620
+ {
1621
+ struct rvu * rvu = (struct rvu * )rvu_irq ;
1622
+ u64 intr ;
1623
+ u8 pf ;
1624
+
1625
+ intr = rvu_read64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT );
1626
+
1627
+ for (pf = 0 ; pf < rvu -> hw -> total_pfs ; pf ++ ) {
1628
+ if (intr & (1ULL << pf )) {
1629
+ /* PF is already dead do only AF related operations */
1630
+ queue_work (rvu -> flr_wq , & rvu -> flr_wrk [pf ].work );
1631
+ /* clear interrupt */
1632
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT ,
1633
+ BIT_ULL (pf ));
1634
+ /* Disable the interrupt */
1635
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT_ENA_W1C ,
1636
+ BIT_ULL (pf ));
1637
+ }
1638
+ }
1639
+ return IRQ_HANDLED ;
1640
+ }
1641
+
1604
1642
static void rvu_unregister_interrupts (struct rvu * rvu )
1605
1643
{
1606
1644
int irq ;
@@ -1609,6 +1647,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
1609
1647
rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFAF_MBOX_INT_ENA_W1C ,
1610
1648
INTR_MASK (rvu -> hw -> total_pfs ) & ~1ULL );
1611
1649
1650
+ /* Disable the PF FLR interrupt */
1651
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT_ENA_W1C ,
1652
+ INTR_MASK (rvu -> hw -> total_pfs ) & ~1ULL );
1653
+
1612
1654
for (irq = 0 ; irq < rvu -> num_vec ; irq ++ ) {
1613
1655
if (rvu -> irq_allocated [irq ])
1614
1656
free_irq (pci_irq_vector (rvu -> pdev , irq ), rvu );
@@ -1660,13 +1702,79 @@ static int rvu_register_interrupts(struct rvu *rvu)
1660
1702
/* Enable mailbox interrupts from all PFs */
1661
1703
rvu_enable_mbox_intr (rvu );
1662
1704
1705
+ /* Register FLR interrupt handler */
1706
+ sprintf (& rvu -> irq_name [RVU_AF_INT_VEC_PFFLR * NAME_SIZE ],
1707
+ "RVUAF FLR" );
1708
+ ret = request_irq (pci_irq_vector (rvu -> pdev , RVU_AF_INT_VEC_PFFLR ),
1709
+ rvu_flr_intr_handler , 0 ,
1710
+ & rvu -> irq_name [RVU_AF_INT_VEC_PFFLR * NAME_SIZE ],
1711
+ rvu );
1712
+ if (ret ) {
1713
+ dev_err (rvu -> dev ,
1714
+ "RVUAF: IRQ registration failed for FLR\n" );
1715
+ goto fail ;
1716
+ }
1717
+ rvu -> irq_allocated [RVU_AF_INT_VEC_PFFLR ] = true;
1718
+
1719
+ /* Enable FLR interrupt for all PFs*/
1720
+ rvu_write64 (rvu , BLKADDR_RVUM ,
1721
+ RVU_AF_PFFLR_INT , INTR_MASK (rvu -> hw -> total_pfs ));
1722
+
1723
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_AF_PFFLR_INT_ENA_W1S ,
1724
+ INTR_MASK (rvu -> hw -> total_pfs ) & ~1ULL );
1725
+
1663
1726
return 0 ;
1664
1727
1665
1728
fail :
1666
1729
pci_free_irq_vectors (rvu -> pdev );
1667
1730
return ret ;
1668
1731
}
1669
1732
1733
+ static void rvu_flr_wq_destroy (struct rvu * rvu )
1734
+ {
1735
+ if (rvu -> flr_wq ) {
1736
+ flush_workqueue (rvu -> flr_wq );
1737
+ destroy_workqueue (rvu -> flr_wq );
1738
+ rvu -> flr_wq = NULL ;
1739
+ }
1740
+ kfree (rvu -> flr_wrk );
1741
+ }
1742
+
1743
+ static int rvu_flr_init (struct rvu * rvu )
1744
+ {
1745
+ u64 cfg ;
1746
+ int pf ;
1747
+
1748
+ /* Enable FLR for all PFs*/
1749
+ for (pf = 1 ; pf < rvu -> hw -> total_pfs ; pf ++ ) {
1750
+ cfg = rvu_read64 (rvu , BLKADDR_RVUM , RVU_PRIV_PFX_CFG (pf ));
1751
+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_PRIV_PFX_CFG (pf ),
1752
+ cfg | BIT_ULL (22 ));
1753
+ }
1754
+
1755
+ rvu -> flr_wq = alloc_workqueue ("rvu_afpf_flr" ,
1756
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM ,
1757
+ 1 );
1758
+ if (!rvu -> flr_wq )
1759
+ return - ENOMEM ;
1760
+
1761
+ rvu -> flr_wrk = devm_kcalloc (rvu -> dev , rvu -> hw -> total_pfs ,
1762
+ sizeof (struct rvu_work ), GFP_KERNEL );
1763
+ if (!rvu -> flr_wrk ) {
1764
+ destroy_workqueue (rvu -> flr_wq );
1765
+ return - ENOMEM ;
1766
+ }
1767
+
1768
+ for (pf = 0 ; pf < rvu -> hw -> total_pfs ; pf ++ ) {
1769
+ rvu -> flr_wrk [pf ].rvu = rvu ;
1770
+ INIT_WORK (& rvu -> flr_wrk [pf ].work , rvu_flr_handler );
1771
+ }
1772
+
1773
+ mutex_init (& rvu -> flr_lock );
1774
+
1775
+ return 0 ;
1776
+ }
1777
+
1670
1778
static int rvu_probe (struct pci_dev * pdev , const struct pci_device_id * id )
1671
1779
{
1672
1780
struct device * dev = & pdev -> dev ;
@@ -1737,11 +1845,17 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1737
1845
if (err )
1738
1846
goto err_mbox ;
1739
1847
1740
- err = rvu_register_interrupts (rvu );
1848
+ err = rvu_flr_init (rvu );
1741
1849
if (err )
1742
1850
goto err_cgx ;
1743
1851
1852
+ err = rvu_register_interrupts (rvu );
1853
+ if (err )
1854
+ goto err_flr ;
1855
+
1744
1856
return 0 ;
1857
+ err_flr :
1858
+ rvu_flr_wq_destroy (rvu );
1745
1859
err_cgx :
1746
1860
rvu_cgx_wq_destroy (rvu );
1747
1861
err_mbox :
@@ -1765,6 +1879,7 @@ static void rvu_remove(struct pci_dev *pdev)
1765
1879
struct rvu * rvu = pci_get_drvdata (pdev );
1766
1880
1767
1881
rvu_unregister_interrupts (rvu );
1882
+ rvu_flr_wq_destroy (rvu );
1768
1883
rvu_cgx_wq_destroy (rvu );
1769
1884
rvu_mbox_destroy (rvu );
1770
1885
rvu_reset_all_blocks (rvu );
0 commit comments