Skip to content

Commit 06792d0

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Cleanup use of iommu_flush_iotlb_psi()
Use cache_tag_flush_range() in switch_to_super_page() to invalidate the necessary caches when switching mappings from normal to super pages. The iommu_flush_iotlb_psi() call in intel_iommu_memory_notifier() is unnecessary since there should be no cache invalidation for the identity domain. Clean up iommu_flush_iotlb_psi() after the last call site is removed. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 129dab6 commit 06792d0

File tree

1 file changed

+2
-169
lines changed

1 file changed

+2
-169
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 2 additions & 169 deletions
Original file line numberDiff line numberDiff line change
@@ -1390,157 +1390,6 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
13901390
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
13911391
}
13921392

1393-
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1394-
u64 addr, unsigned mask)
1395-
{
1396-
struct dev_pasid_info *dev_pasid;
1397-
struct device_domain_info *info;
1398-
unsigned long flags;
1399-
1400-
if (!domain->has_iotlb_device)
1401-
return;
1402-
1403-
spin_lock_irqsave(&domain->lock, flags);
1404-
list_for_each_entry(info, &domain->devices, link)
1405-
__iommu_flush_dev_iotlb(info, addr, mask);
1406-
1407-
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
1408-
info = dev_iommu_priv_get(dev_pasid->dev);
1409-
1410-
if (!info->ats_enabled)
1411-
continue;
1412-
1413-
qi_flush_dev_iotlb_pasid(info->iommu,
1414-
PCI_DEVID(info->bus, info->devfn),
1415-
info->pfsid, dev_pasid->pasid,
1416-
info->ats_qdep, addr,
1417-
mask);
1418-
}
1419-
spin_unlock_irqrestore(&domain->lock, flags);
1420-
}
1421-
1422-
static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
1423-
struct dmar_domain *domain, u64 addr,
1424-
unsigned long npages, bool ih)
1425-
{
1426-
u16 did = domain_id_iommu(domain, iommu);
1427-
struct dev_pasid_info *dev_pasid;
1428-
unsigned long flags;
1429-
1430-
spin_lock_irqsave(&domain->lock, flags);
1431-
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
1432-
qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
1433-
1434-
if (!list_empty(&domain->devices))
1435-
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
1436-
spin_unlock_irqrestore(&domain->lock, flags);
1437-
}
1438-
1439-
static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1440-
unsigned long pfn, unsigned int pages,
1441-
int ih)
1442-
{
1443-
unsigned int aligned_pages = __roundup_pow_of_two(pages);
1444-
unsigned long bitmask = aligned_pages - 1;
1445-
unsigned int mask = ilog2(aligned_pages);
1446-
u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
1447-
1448-
/*
1449-
* PSI masks the low order bits of the base address. If the
1450-
* address isn't aligned to the mask, then compute a mask value
1451-
* needed to ensure the target range is flushed.
1452-
*/
1453-
if (unlikely(bitmask & pfn)) {
1454-
unsigned long end_pfn = pfn + pages - 1, shared_bits;
1455-
1456-
/*
1457-
* Since end_pfn <= pfn + bitmask, the only way bits
1458-
* higher than bitmask can differ in pfn and end_pfn is
1459-
* by carrying. This means after masking out bitmask,
1460-
* high bits starting with the first set bit in
1461-
* shared_bits are all equal in both pfn and end_pfn.
1462-
*/
1463-
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
1464-
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
1465-
}
1466-
1467-
/*
1468-
* Fallback to domain selective flush if no PSI support or
1469-
* the size is too big.
1470-
*/
1471-
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1472-
iommu->flush.flush_iotlb(iommu, did, 0, 0,
1473-
DMA_TLB_DSI_FLUSH);
1474-
else
1475-
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1476-
DMA_TLB_PSI_FLUSH);
1477-
}
1478-
1479-
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1480-
struct dmar_domain *domain,
1481-
unsigned long pfn, unsigned int pages,
1482-
int ih, int map)
1483-
{
1484-
unsigned int aligned_pages = __roundup_pow_of_two(pages);
1485-
unsigned int mask = ilog2(aligned_pages);
1486-
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1487-
u16 did = domain_id_iommu(domain, iommu);
1488-
1489-
if (WARN_ON(!pages))
1490-
return;
1491-
1492-
if (ih)
1493-
ih = 1 << 6;
1494-
1495-
if (domain->use_first_level)
1496-
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
1497-
else
1498-
__iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
1499-
1500-
if (!map)
1501-
iommu_flush_dev_iotlb(domain, addr, mask);
1502-
}
1503-
1504-
/*
1505-
* Flush the relevant caches in nested translation if the domain
1506-
* also serves as a parent
1507-
*/
1508-
static void parent_domain_flush(struct dmar_domain *domain,
1509-
unsigned long pfn,
1510-
unsigned long pages, int ih)
1511-
{
1512-
struct dmar_domain *s1_domain;
1513-
1514-
spin_lock(&domain->s1_lock);
1515-
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
1516-
struct device_domain_info *device_info;
1517-
struct iommu_domain_info *info;
1518-
unsigned long flags;
1519-
unsigned long i;
1520-
1521-
xa_for_each(&s1_domain->iommu_array, i, info)
1522-
__iommu_flush_iotlb_psi(info->iommu, info->did,
1523-
pfn, pages, ih);
1524-
1525-
if (!s1_domain->has_iotlb_device)
1526-
continue;
1527-
1528-
spin_lock_irqsave(&s1_domain->lock, flags);
1529-
list_for_each_entry(device_info, &s1_domain->devices, link)
1530-
/*
1531-
* Address translation cache in device side caches the
1532-
* result of nested translation. There is no easy way
1533-
* to identify the exact set of nested translations
1534-
* affected by a change in S2. So just flush the entire
1535-
* device cache.
1536-
*/
1537-
__iommu_flush_dev_iotlb(device_info, 0,
1538-
MAX_AGAW_PFN_WIDTH);
1539-
spin_unlock_irqrestore(&s1_domain->lock, flags);
1540-
}
1541-
spin_unlock(&domain->s1_lock);
1542-
}
1543-
15441393
static void intel_flush_iotlb_all(struct iommu_domain *domain)
15451394
{
15461395
cache_tag_flush_all(to_dmar_domain(domain));
@@ -1991,9 +1840,7 @@ static void switch_to_super_page(struct dmar_domain *domain,
19911840
unsigned long end_pfn, int level)
19921841
{
19931842
unsigned long lvl_pages = lvl_to_nr_pages(level);
1994-
struct iommu_domain_info *info;
19951843
struct dma_pte *pte = NULL;
1996-
unsigned long i;
19971844

19981845
while (start_pfn <= end_pfn) {
19991846
if (!pte)
@@ -2005,13 +1852,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
20051852
start_pfn + lvl_pages - 1,
20061853
level + 1);
20071854

2008-
xa_for_each(&domain->iommu_array, i, info)
2009-
iommu_flush_iotlb_psi(info->iommu, domain,
2010-
start_pfn, lvl_pages,
2011-
0, 0);
2012-
if (domain->nested_parent)
2013-
parent_domain_flush(domain, start_pfn,
2014-
lvl_pages, 0);
1855+
cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT,
1856+
end_pfn << VTD_PAGE_SHIFT, 0);
20151857
}
20161858

20171859
pte++;
@@ -3381,18 +3223,9 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
33813223
case MEM_OFFLINE:
33823224
case MEM_CANCEL_ONLINE:
33833225
{
3384-
struct dmar_drhd_unit *drhd;
3385-
struct intel_iommu *iommu;
33863226
LIST_HEAD(freelist);
33873227

33883228
domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
3389-
3390-
rcu_read_lock();
3391-
for_each_active_iommu(iommu, drhd)
3392-
iommu_flush_iotlb_psi(iommu, si_domain,
3393-
start_vpfn, mhp->nr_pages,
3394-
list_empty(&freelist), 0);
3395-
rcu_read_unlock();
33963229
put_pages_list(&freelist);
33973230
}
33983231
break;

0 commit comments

Comments
 (0)