Skip to content

Commit c987ff0

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/dma: Respect IOMMU aperture when allocating
Where a device driver has set a 64-bit DMA mask to indicate the absence of addressing limitations, we still need to ensure that we don't allocate IOVAs beyond the actual input size of the IOMMU. The reported aperture is the most reliable way we have of inferring that input address size, so use that to enforce a hard upper limit where available. Fixes: 0db2e5d ("iommu: Implement common IOMMU ops for DMA mapping") Signed-off-by: Robin Murphy <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent 3ec6004 commit c987ff0

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -152,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
152152
}
153153
}
154154

155-
static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
155+
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
156156
dma_addr_t dma_limit)
157157
{
158+
struct iova_domain *iovad = domain->iova_cookie;
158159
unsigned long shift = iova_shift(iovad);
159160
unsigned long length = iova_align(iovad, size) >> shift;
160161

162+
if (domain->geometry.force_aperture)
163+
dma_limit = min(dma_limit, domain->geometry.aperture_end);
161164
/*
162165
* Enforce size-alignment to be safe - there could perhaps be an
163166
* attribute to control this per-device, or at least per-domain...
@@ -315,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
315318
if (!pages)
316319
return NULL;
317320

318-
iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
321+
iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
319322
if (!iova)
320323
goto out_free_pages;
321324

@@ -387,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
387390
phys_addr_t phys = page_to_phys(page) + offset;
388391
size_t iova_off = iova_offset(iovad, phys);
389392
size_t len = iova_align(iovad, size + iova_off);
390-
struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
393+
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
391394

392395
if (!iova)
393396
return DMA_ERROR_CODE;
@@ -539,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
539542
prev = s;
540543
}
541544

542-
iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
545+
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
543546
if (!iova)
544547
goto out_restore_sg;
545548

0 commit comments

Comments
 (0)