@@ -128,14 +128,14 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
128
128
{
129
129
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
130
130
__dma_page_cpu_to_dev (page , offset , size , dir );
131
- return pfn_to_dma (dev , page_to_pfn (page )) + offset ;
131
+ return phys_to_dma (dev , page_to_phys (page ) + offset ) ;
132
132
}
133
133
134
134
static dma_addr_t arm_coherent_dma_map_page (struct device * dev , struct page * page ,
135
135
unsigned long offset , size_t size , enum dma_data_direction dir ,
136
136
unsigned long attrs )
137
137
{
138
- return pfn_to_dma (dev , page_to_pfn (page )) + offset ;
138
+ return phys_to_dma (dev , page_to_phys (page ) + offset ) ;
139
139
}
140
140
141
141
/**
@@ -156,23 +156,23 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
156
156
size_t size , enum dma_data_direction dir , unsigned long attrs )
157
157
{
158
158
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
159
- __dma_page_dev_to_cpu (pfn_to_page ( dma_to_pfn (dev , handle )),
159
+ __dma_page_dev_to_cpu (phys_to_page ( dma_to_phys (dev , handle )),
160
160
handle & ~PAGE_MASK , size , dir );
161
161
}
162
162
163
163
static void arm_dma_sync_single_for_cpu (struct device * dev ,
164
164
dma_addr_t handle , size_t size , enum dma_data_direction dir )
165
165
{
166
166
unsigned int offset = handle & (PAGE_SIZE - 1 );
167
- struct page * page = pfn_to_page ( dma_to_pfn (dev , handle - offset ));
167
+ struct page * page = phys_to_page ( dma_to_phys (dev , handle - offset ));
168
168
__dma_page_dev_to_cpu (page , offset , size , dir );
169
169
}
170
170
171
171
static void arm_dma_sync_single_for_device (struct device * dev ,
172
172
dma_addr_t handle , size_t size , enum dma_data_direction dir )
173
173
{
174
174
unsigned int offset = handle & (PAGE_SIZE - 1 );
175
- struct page * page = pfn_to_page ( dma_to_pfn (dev , handle - offset ));
175
+ struct page * page = phys_to_page ( dma_to_phys (dev , handle - offset ));
176
176
__dma_page_cpu_to_dev (page , offset , size , dir );
177
177
}
178
178
@@ -190,7 +190,7 @@ static int arm_dma_supported(struct device *dev, u64 mask)
190
190
* Translate the device's DMA mask to a PFN limit. This
191
191
* PFN number includes the page which we can DMA to.
192
192
*/
193
- return dma_to_pfn ( dev , mask ) >= max_dma_pfn ;
193
+ return PHYS_PFN ( dma_to_phys ( dev , mask ) ) >= max_dma_pfn ;
194
194
}
195
195
196
196
static void __dma_clear_buffer (struct page * page , size_t size , int coherent_flag )
@@ -681,7 +681,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
681
681
if (page ) {
682
682
unsigned long flags ;
683
683
684
- * handle = pfn_to_dma (dev , page_to_pfn (page ));
684
+ * handle = phys_to_dma (dev , page_to_phys (page ));
685
685
buf -> virt = args .want_vaddr ? addr : page ;
686
686
687
687
spin_lock_irqsave (& arm_dma_bufs_lock , flags );
@@ -721,7 +721,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
721
721
int ret = - ENXIO ;
722
722
unsigned long nr_vma_pages = vma_pages (vma );
723
723
unsigned long nr_pages = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
724
- unsigned long pfn = dma_to_pfn ( dev , dma_addr );
724
+ unsigned long pfn = PHYS_PFN ( dma_to_phys ( dev , dma_addr ) );
725
725
unsigned long off = vma -> vm_pgoff ;
726
726
727
727
if (dma_mmap_from_dev_coherent (dev , vma , cpu_addr , size , & ret ))
@@ -762,7 +762,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
762
762
dma_addr_t handle , unsigned long attrs ,
763
763
bool is_coherent )
764
764
{
765
- struct page * page = pfn_to_page ( dma_to_pfn (dev , handle ));
765
+ struct page * page = phys_to_page ( dma_to_phys (dev , handle ));
766
766
struct arm_dma_buffer * buf ;
767
767
struct arm_dma_free_args args = {
768
768
.dev = dev ,
@@ -796,15 +796,15 @@ static int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
796
796
void * cpu_addr , dma_addr_t handle , size_t size ,
797
797
unsigned long attrs )
798
798
{
799
- unsigned long pfn = dma_to_pfn (dev , handle );
799
+ phys_addr_t paddr = dma_to_phys (dev , handle );
800
800
struct page * page ;
801
801
int ret ;
802
802
803
803
/* If the PFN is not valid, we do not have a struct page */
804
- if (!pfn_valid (pfn ))
804
+ if (!pfn_valid (PHYS_PFN ( paddr ) ))
805
805
return - ENXIO ;
806
806
807
- page = pfn_to_page ( pfn );
807
+ page = phys_to_page ( paddr );
808
808
809
809
ret = sg_alloc_table (sgt , 1 , GFP_KERNEL );
810
810
if (unlikely (ret ))
0 commit comments