@@ -1597,6 +1597,61 @@ static void domain_exit(struct dmar_domain *domain)
1597
1597
kfree (domain );
1598
1598
}
1599
1599
1600
+ /*
1601
+ * For kdump cases, old valid entries may be cached due to the
1602
+ * in-flight DMA and copied pgtable, but there is no unmapping
1603
+ * behaviour for them, thus we need an explicit cache flush for
1604
+ * the newly-mapped device. For kdump, at this point, the device
1605
+ * is supposed to finish reset at its driver probe stage, so no
1606
+ * in-flight DMA will exist, and we don't need to worry anymore
1607
+ * hereafter.
1608
+ */
1609
+ static void copied_context_tear_down (struct intel_iommu * iommu ,
1610
+ struct context_entry * context ,
1611
+ u8 bus , u8 devfn )
1612
+ {
1613
+ u16 did_old ;
1614
+
1615
+ if (!context_copied (iommu , bus , devfn ))
1616
+ return ;
1617
+
1618
+ assert_spin_locked (& iommu -> lock );
1619
+
1620
+ did_old = context_domain_id (context );
1621
+ context_clear_entry (context );
1622
+
1623
+ if (did_old < cap_ndoms (iommu -> cap )) {
1624
+ iommu -> flush .flush_context (iommu , did_old ,
1625
+ (((u16 )bus ) << 8 ) | devfn ,
1626
+ DMA_CCMD_MASK_NOBIT ,
1627
+ DMA_CCMD_DEVICE_INVL );
1628
+ iommu -> flush .flush_iotlb (iommu , did_old , 0 , 0 ,
1629
+ DMA_TLB_DSI_FLUSH );
1630
+ }
1631
+
1632
+ clear_context_copied (iommu , bus , devfn );
1633
+ }
1634
+
1635
+ /*
1636
+ * It's a non-present to present mapping. If hardware doesn't cache
1637
+ * non-present entry we only need to flush the write-buffer. If the
1638
+ * _does_ cache non-present entries, then it does so in the special
1639
+ * domain #0, which we have to flush:
1640
+ */
1641
+ static void context_present_cache_flush (struct intel_iommu * iommu , u16 did ,
1642
+ u8 bus , u8 devfn )
1643
+ {
1644
+ if (cap_caching_mode (iommu -> cap )) {
1645
+ iommu -> flush .flush_context (iommu , 0 ,
1646
+ (((u16 )bus ) << 8 ) | devfn ,
1647
+ DMA_CCMD_MASK_NOBIT ,
1648
+ DMA_CCMD_DEVICE_INVL );
1649
+ iommu -> flush .flush_iotlb (iommu , did , 0 , 0 , DMA_TLB_DSI_FLUSH );
1650
+ } else {
1651
+ iommu_flush_write_buffer (iommu );
1652
+ }
1653
+ }
1654
+
1600
1655
static int domain_context_mapping_one (struct dmar_domain * domain ,
1601
1656
struct intel_iommu * iommu ,
1602
1657
u8 bus , u8 devfn )
@@ -1625,31 +1680,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1625
1680
if (context_present (context ) && !context_copied (iommu , bus , devfn ))
1626
1681
goto out_unlock ;
1627
1682
1628
- /*
1629
- * For kdump cases, old valid entries may be cached due to the
1630
- * in-flight DMA and copied pgtable, but there is no unmapping
1631
- * behaviour for them, thus we need an explicit cache flush for
1632
- * the newly-mapped device. For kdump, at this point, the device
1633
- * is supposed to finish reset at its driver probe stage, so no
1634
- * in-flight DMA will exist, and we don't need to worry anymore
1635
- * hereafter.
1636
- */
1637
- if (context_copied (iommu , bus , devfn )) {
1638
- u16 did_old = context_domain_id (context );
1639
-
1640
- if (did_old < cap_ndoms (iommu -> cap )) {
1641
- iommu -> flush .flush_context (iommu , did_old ,
1642
- (((u16 )bus ) << 8 ) | devfn ,
1643
- DMA_CCMD_MASK_NOBIT ,
1644
- DMA_CCMD_DEVICE_INVL );
1645
- iommu -> flush .flush_iotlb (iommu , did_old , 0 , 0 ,
1646
- DMA_TLB_DSI_FLUSH );
1647
- }
1648
-
1649
- clear_context_copied (iommu , bus , devfn );
1650
- }
1651
-
1683
+ copied_context_tear_down (iommu , context , bus , devfn );
1652
1684
context_clear_entry (context );
1685
+
1653
1686
context_set_domain_id (context , did );
1654
1687
1655
1688
if (translation != CONTEXT_TT_PASS_THROUGH ) {
@@ -1685,23 +1718,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1685
1718
context_set_present (context );
1686
1719
if (!ecap_coherent (iommu -> ecap ))
1687
1720
clflush_cache_range (context , sizeof (* context ));
1688
-
1689
- /*
1690
- * It's a non-present to present mapping. If hardware doesn't cache
1691
- * non-present entry we only need to flush the write-buffer. If the
1692
- * _does_ cache non-present entries, then it does so in the special
1693
- * domain #0, which we have to flush:
1694
- */
1695
- if (cap_caching_mode (iommu -> cap )) {
1696
- iommu -> flush .flush_context (iommu , 0 ,
1697
- (((u16 )bus ) << 8 ) | devfn ,
1698
- DMA_CCMD_MASK_NOBIT ,
1699
- DMA_CCMD_DEVICE_INVL );
1700
- iommu -> flush .flush_iotlb (iommu , did , 0 , 0 , DMA_TLB_DSI_FLUSH );
1701
- } else {
1702
- iommu_flush_write_buffer (iommu );
1703
- }
1704
-
1721
+ context_present_cache_flush (iommu , did , bus , devfn );
1705
1722
ret = 0 ;
1706
1723
1707
1724
out_unlock :
0 commit comments