Skip to content

Commit b3aa14f

Browse files
author
Christoph Hellwig
committed
iommu: remove the mapping_error dma_map_ops method
Return DMA_MAPPING_ERROR instead of 0 on a dma mapping failure and let the core dma-mapping code handle the rest. Note that the existing code used AMD_IOMMU_MAPPING_ERROR to check from a 0 return from the IOVA allocator, which is replaced with an explicit 0 as in the implementation and other users of that interface. Signed-off-by: Christoph Hellwig <[email protected]> Acked-by: Linus Torvalds <[email protected]>
1 parent 887712a commit b3aa14f

File tree

1 file changed

+5
-13
lines changed

1 file changed

+5
-13
lines changed

drivers/iommu/amd_iommu.c

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,6 @@
5555
#include "amd_iommu_types.h"
5656
#include "irq_remapping.h"
5757

58-
#define AMD_IOMMU_MAPPING_ERROR 0
59-
6058
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
6159

6260
#define LOOP_TIMEOUT 100000
@@ -2339,7 +2337,7 @@ static dma_addr_t __map_single(struct device *dev,
23392337
paddr &= PAGE_MASK;
23402338

23412339
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
2342-
if (address == AMD_IOMMU_MAPPING_ERROR)
2340+
if (!address)
23432341
goto out;
23442342

23452343
prot = dir2prot(direction);
@@ -2376,7 +2374,7 @@ static dma_addr_t __map_single(struct device *dev,
23762374

23772375
dma_ops_free_iova(dma_dom, address, pages);
23782376

2379-
return AMD_IOMMU_MAPPING_ERROR;
2377+
return DMA_MAPPING_ERROR;
23802378
}
23812379

23822380
/*
@@ -2427,7 +2425,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
24272425
if (PTR_ERR(domain) == -EINVAL)
24282426
return (dma_addr_t)paddr;
24292427
else if (IS_ERR(domain))
2430-
return AMD_IOMMU_MAPPING_ERROR;
2428+
return DMA_MAPPING_ERROR;
24312429

24322430
dma_mask = *dev->dma_mask;
24332431
dma_dom = to_dma_ops_domain(domain);
@@ -2504,7 +2502,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
25042502
npages = sg_num_pages(dev, sglist, nelems);
25052503

25062504
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
2507-
if (address == AMD_IOMMU_MAPPING_ERROR)
2505+
if (address == DMA_MAPPING_ERROR)
25082506
goto out_err;
25092507

25102508
prot = dir2prot(direction);
@@ -2627,7 +2625,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
26272625
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
26282626
size, DMA_BIDIRECTIONAL, dma_mask);
26292627

2630-
if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
2628+
if (*dma_addr == DMA_MAPPING_ERROR)
26312629
goto out_free;
26322630

26332631
return page_address(page);
@@ -2678,11 +2676,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
26782676
return check_device(dev);
26792677
}
26802678

2681-
static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
2682-
{
2683-
return dma_addr == AMD_IOMMU_MAPPING_ERROR;
2684-
}
2685-
26862679
static const struct dma_map_ops amd_iommu_dma_ops = {
26872680
.alloc = alloc_coherent,
26882681
.free = free_coherent,
@@ -2691,7 +2684,6 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
26912684
.map_sg = map_sg,
26922685
.unmap_sg = unmap_sg,
26932686
.dma_supported = amd_iommu_dma_supported,
2694-
.mapping_error = amd_iommu_mapping_error,
26952687
};
26962688

26972689
static int init_reserved_iova_ranges(void)

0 commit comments

Comments
 (0)