Skip to content

Commit f9774cf

Browse files
author
Christoph Hellwig
committed
ARM/dma-mapping: use dma_to_phys/phys_to_dma in the dma-mapping code
Use the helpers as expected by the dma-direct code in the old arm dma-mapping code to ease a gradual switch to the common DMA code. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Arnd Bergmann <[email protected]> Tested-by: Marc Zyngier <[email protected]>
1 parent d6e2e92 commit f9774cf

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

arch/arm/mm/dma-mapping.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -128,14 +128,14 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
128128
{
129129
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
130130
__dma_page_cpu_to_dev(page, offset, size, dir);
131-
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
131+
return phys_to_dma(dev, page_to_phys(page) + offset);
132132
}
133133

134134
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
135135
unsigned long offset, size_t size, enum dma_data_direction dir,
136136
unsigned long attrs)
137137
{
138-
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
138+
return phys_to_dma(dev, page_to_phys(page) + offset);
139139
}
140140

141141
/**
@@ -156,23 +156,23 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
156156
size_t size, enum dma_data_direction dir, unsigned long attrs)
157157
{
158158
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
159-
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
159+
__dma_page_dev_to_cpu(phys_to_page(dma_to_phys(dev, handle)),
160160
handle & ~PAGE_MASK, size, dir);
161161
}
162162

163163
static void arm_dma_sync_single_for_cpu(struct device *dev,
164164
dma_addr_t handle, size_t size, enum dma_data_direction dir)
165165
{
166166
unsigned int offset = handle & (PAGE_SIZE - 1);
167-
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
167+
struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
168168
__dma_page_dev_to_cpu(page, offset, size, dir);
169169
}
170170

171171
static void arm_dma_sync_single_for_device(struct device *dev,
172172
dma_addr_t handle, size_t size, enum dma_data_direction dir)
173173
{
174174
unsigned int offset = handle & (PAGE_SIZE - 1);
175-
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
175+
struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
176176
__dma_page_cpu_to_dev(page, offset, size, dir);
177177
}
178178

@@ -190,7 +190,7 @@ static int arm_dma_supported(struct device *dev, u64 mask)
190190
* Translate the device's DMA mask to a PFN limit. This
191191
* PFN number includes the page which we can DMA to.
192192
*/
193-
return dma_to_pfn(dev, mask) >= max_dma_pfn;
193+
return PHYS_PFN(dma_to_phys(dev, mask)) >= max_dma_pfn;
194194
}
195195

196196
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
@@ -681,7 +681,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
681681
if (page) {
682682
unsigned long flags;
683683

684-
*handle = pfn_to_dma(dev, page_to_pfn(page));
684+
*handle = phys_to_dma(dev, page_to_phys(page));
685685
buf->virt = args.want_vaddr ? addr : page;
686686

687687
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
@@ -721,7 +721,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
721721
int ret = -ENXIO;
722722
unsigned long nr_vma_pages = vma_pages(vma);
723723
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
724-
unsigned long pfn = dma_to_pfn(dev, dma_addr);
724+
unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
725725
unsigned long off = vma->vm_pgoff;
726726

727727
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
@@ -762,7 +762,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
762762
dma_addr_t handle, unsigned long attrs,
763763
bool is_coherent)
764764
{
765-
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
765+
struct page *page = phys_to_page(dma_to_phys(dev, handle));
766766
struct arm_dma_buffer *buf;
767767
struct arm_dma_free_args args = {
768768
.dev = dev,
@@ -796,15 +796,15 @@ static int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
796796
void *cpu_addr, dma_addr_t handle, size_t size,
797797
unsigned long attrs)
798798
{
799-
unsigned long pfn = dma_to_pfn(dev, handle);
799+
phys_addr_t paddr = dma_to_phys(dev, handle);
800800
struct page *page;
801801
int ret;
802802

803803
/* If the PFN is not valid, we do not have a struct page */
804-
if (!pfn_valid(pfn))
804+
if (!pfn_valid(PHYS_PFN(paddr)))
805805
return -ENXIO;
806806

807-
page = pfn_to_page(pfn);
807+
page = phys_to_page(paddr);
808808

809809
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
810810
if (unlikely(ret))

0 commit comments

Comments
 (0)