Skip to content

Commit 096339a

Browse files
Gaurav Batrampe
authored andcommitted
powerpc/iommu: DMA address offset is incorrectly calculated with 2MB TCEs
When DMA window is backed by 2MB TCEs, the DMA address for the mapped page should be the offset of the page relative to the 2MB TCE. The code was incorrectly setting the DMA address to the beginning of the TCE range. Mellanox driver is reporting timeout trying to ENABLE_HCA for an SR-IOV ethernet port, when DMA window is backed by 2MB TCEs. Fixes: 3872731 ("powerps/pseries/dma: Add support for 2M IOMMU page size") Cc: [email protected] # v5.16+ Signed-off-by: Gaurav Batra <[email protected]> Reviewed-by: Greg Joyce <[email protected]> Reviewed-by: Brian King <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://msgid.link/[email protected]
1 parent ad59382 commit 096339a

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

arch/powerpc/kernel/iommu.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
518518
/* Convert entry to a dma_addr_t */
519519
entry += tbl->it_offset;
520520
dma_addr = entry << tbl->it_page_shift;
521-
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
521+
dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
522522

523523
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
524524
npages, entry, dma_addr);
@@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
905905
unsigned int order;
906906
unsigned int nio_pages, io_order;
907907
struct page *page;
908+
int tcesize = (1 << tbl->it_page_shift);
908909

909910
size = PAGE_ALIGN(size);
910911
order = get_order(size);
@@ -931,15 +932,17 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
931932
memset(ret, 0, size);
932933

933934
/* Set up tces to cover the allocated range */
934-
nio_pages = size >> tbl->it_page_shift;
935+
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
936+
935937
io_order = get_iommu_order(size, tbl);
936938
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
937939
mask >> tbl->it_page_shift, io_order, 0);
938940
if (mapping == DMA_MAPPING_ERROR) {
939941
free_pages((unsigned long)ret, order);
940942
return NULL;
941943
}
942-
*dma_handle = mapping;
944+
945+
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
943946
return ret;
944947
}
945948

@@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
950953
unsigned int nio_pages;
951954

952955
size = PAGE_ALIGN(size);
953-
nio_pages = size >> tbl->it_page_shift;
956+
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
954957
iommu_free(tbl, dma_handle, nio_pages);
955958
size = PAGE_ALIGN(size);
956959
free_pages((unsigned long)vaddr, get_order(size));

0 commit comments

Comments
 (0)