Skip to content

Commit c719198

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Factor out helpers from domain_context_mapping_one()
Extract common code from domain_context_mapping_one() into new helpers, making it reusable by other functions such as the upcoming identity domain implementation. No intentional functional changes. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Reviewed-by: Jerry Snitselaar <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 487df68 commit c719198

File tree

1 file changed

+58
-41
lines changed

1 file changed

+58
-41
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 58 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1597,6 +1597,61 @@ static void domain_exit(struct dmar_domain *domain)
15971597
kfree(domain);
15981598
}
15991599

1600+
/*
1601+
* For kdump cases, old valid entries may be cached due to the
1602+
* in-flight DMA and copied pgtable, but there is no unmapping
1603+
* behaviour for them, thus we need an explicit cache flush for
1604+
* the newly-mapped device. For kdump, at this point, the device
1605+
* is supposed to finish reset at its driver probe stage, so no
1606+
* in-flight DMA will exist, and we don't need to worry anymore
1607+
* hereafter.
1608+
*/
1609+
static void copied_context_tear_down(struct intel_iommu *iommu,
1610+
struct context_entry *context,
1611+
u8 bus, u8 devfn)
1612+
{
1613+
u16 did_old;
1614+
1615+
if (!context_copied(iommu, bus, devfn))
1616+
return;
1617+
1618+
assert_spin_locked(&iommu->lock);
1619+
1620+
did_old = context_domain_id(context);
1621+
context_clear_entry(context);
1622+
1623+
if (did_old < cap_ndoms(iommu->cap)) {
1624+
iommu->flush.flush_context(iommu, did_old,
1625+
(((u16)bus) << 8) | devfn,
1626+
DMA_CCMD_MASK_NOBIT,
1627+
DMA_CCMD_DEVICE_INVL);
1628+
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1629+
DMA_TLB_DSI_FLUSH);
1630+
}
1631+
1632+
clear_context_copied(iommu, bus, devfn);
1633+
}
1634+
1635+
/*
1636+
* It's a non-present to present mapping. If hardware doesn't cache
1637+
* non-present entry we only need to flush the write-buffer. If the
1638+
* _does_ cache non-present entries, then it does so in the special
1639+
* domain #0, which we have to flush:
1640+
*/
1641+
static void context_present_cache_flush(struct intel_iommu *iommu, u16 did,
1642+
u8 bus, u8 devfn)
1643+
{
1644+
if (cap_caching_mode(iommu->cap)) {
1645+
iommu->flush.flush_context(iommu, 0,
1646+
(((u16)bus) << 8) | devfn,
1647+
DMA_CCMD_MASK_NOBIT,
1648+
DMA_CCMD_DEVICE_INVL);
1649+
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1650+
} else {
1651+
iommu_flush_write_buffer(iommu);
1652+
}
1653+
}
1654+
16001655
static int domain_context_mapping_one(struct dmar_domain *domain,
16011656
struct intel_iommu *iommu,
16021657
u8 bus, u8 devfn)
@@ -1625,31 +1680,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
16251680
if (context_present(context) && !context_copied(iommu, bus, devfn))
16261681
goto out_unlock;
16271682

1628-
/*
1629-
* For kdump cases, old valid entries may be cached due to the
1630-
* in-flight DMA and copied pgtable, but there is no unmapping
1631-
* behaviour for them, thus we need an explicit cache flush for
1632-
* the newly-mapped device. For kdump, at this point, the device
1633-
* is supposed to finish reset at its driver probe stage, so no
1634-
* in-flight DMA will exist, and we don't need to worry anymore
1635-
* hereafter.
1636-
*/
1637-
if (context_copied(iommu, bus, devfn)) {
1638-
u16 did_old = context_domain_id(context);
1639-
1640-
if (did_old < cap_ndoms(iommu->cap)) {
1641-
iommu->flush.flush_context(iommu, did_old,
1642-
(((u16)bus) << 8) | devfn,
1643-
DMA_CCMD_MASK_NOBIT,
1644-
DMA_CCMD_DEVICE_INVL);
1645-
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1646-
DMA_TLB_DSI_FLUSH);
1647-
}
1648-
1649-
clear_context_copied(iommu, bus, devfn);
1650-
}
1651-
1683+
copied_context_tear_down(iommu, context, bus, devfn);
16521684
context_clear_entry(context);
1685+
16531686
context_set_domain_id(context, did);
16541687

16551688
if (translation != CONTEXT_TT_PASS_THROUGH) {
@@ -1685,23 +1718,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
16851718
context_set_present(context);
16861719
if (!ecap_coherent(iommu->ecap))
16871720
clflush_cache_range(context, sizeof(*context));
1688-
1689-
/*
1690-
* It's a non-present to present mapping. If hardware doesn't cache
1691-
* non-present entry we only need to flush the write-buffer. If the
1692-
* _does_ cache non-present entries, then it does so in the special
1693-
* domain #0, which we have to flush:
1694-
*/
1695-
if (cap_caching_mode(iommu->cap)) {
1696-
iommu->flush.flush_context(iommu, 0,
1697-
(((u16)bus) << 8) | devfn,
1698-
DMA_CCMD_MASK_NOBIT,
1699-
DMA_CCMD_DEVICE_INVL);
1700-
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1701-
} else {
1702-
iommu_flush_write_buffer(iommu);
1703-
}
1704-
1721+
context_present_cache_flush(iommu, did, bus, devfn);
17051722
ret = 0;
17061723

17071724
out_unlock:

0 commit comments

Comments
 (0)