Skip to content

Commit 6de8ad9

Browse files
committed
x86/amd-iommu: Make iommu_flush_pages aware of multiple IOMMUs
This patch extends the iommu_flush_pages function to flush the TLB entries on all IOMMUs the domain has devices on. This basically gives up the former assumption that dma_ops domains are only bound to one IOMMU in the system. For dma_ops domains this is still true but not for IOMMU-API managed domains. Giving this assumption up for dma_ops domains too allows code simplification. Further it splits out the main logic into a generic function which can be used by iommu_flush_tlb too. Signed-off-by: Joerg Roedel <[email protected]>
1 parent 0518a3a commit 6de8ad9

File tree

1 file changed

+24
-7
lines changed

1 file changed

+24
-7
lines changed

arch/x86/kernel/amd_iommu.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -447,10 +447,10 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
447447
* It invalidates a single PTE if the range to flush is within a single
448448
* page. Otherwise it flushes the whole TLB of the IOMMU.
449449
*/
450-
static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
451-
u64 address, size_t size)
450+
static void __iommu_flush_pages(struct protection_domain *domain,
451+
u64 address, size_t size, int pde)
452452
{
453-
int s = 0;
453+
int s = 0, i;
454454
unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
455455

456456
address &= PAGE_MASK;
@@ -464,9 +464,26 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
464464
s = 1;
465465
}
466466

467-
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
468467

469-
return 0;
468+
for (i = 0; i < amd_iommus_present; ++i) {
469+
if (!domain->dev_iommu[i])
470+
continue;
471+
472+
/*
473+
* Devices of this domain are behind this IOMMU
474+
* We need a TLB flush
475+
*/
476+
iommu_queue_inv_iommu_pages(amd_iommus[i], address,
477+
domain->id, pde, s);
478+
}
479+
480+
return;
481+
}
482+
483+
static void iommu_flush_pages(struct protection_domain *domain,
484+
u64 address, size_t size)
485+
{
486+
__iommu_flush_pages(domain, address, size, 0);
470487
}
471488

472489
/* Flush the whole IO/TLB for a given protection domain */
@@ -1683,7 +1700,7 @@ static dma_addr_t __map_single(struct device *dev,
16831700
iommu_flush_tlb(iommu, dma_dom->domain.id);
16841701
dma_dom->need_flush = false;
16851702
} else if (unlikely(iommu_has_npcache(iommu)))
1686-
iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
1703+
iommu_flush_pages(&dma_dom->domain, address, size);
16871704

16881705
out:
16891706
return address;
@@ -1731,7 +1748,7 @@ static void __unmap_single(struct amd_iommu *iommu,
17311748
dma_ops_free_addresses(dma_dom, dma_addr, pages);
17321749

17331750
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1734-
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
1751+
iommu_flush_pages(&dma_dom->domain, dma_addr, size);
17351752
dma_dom->need_flush = false;
17361753
}
17371754
}

0 commit comments

Comments
 (0)