@@ -111,8 +111,8 @@ static int intel_gtt_map_memory(struct page **pages,
111
111
for_each_sg (st -> sgl , sg , num_entries , i )
112
112
sg_set_page (sg , pages [i ], PAGE_SIZE , 0 );
113
113
114
- if (!pci_map_sg ( intel_private .pcidev ,
115
- st -> sgl , st -> nents , PCI_DMA_BIDIRECTIONAL ))
114
+ if (!dma_map_sg ( & intel_private .pcidev -> dev , st -> sgl , st -> nents ,
115
+ DMA_BIDIRECTIONAL ))
116
116
goto err ;
117
117
118
118
return 0 ;
@@ -127,8 +127,8 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
127
127
struct sg_table st ;
128
128
DBG ("try unmapping %lu pages\n" , (unsigned long )mem -> page_count );
129
129
130
- pci_unmap_sg ( intel_private .pcidev , sg_list ,
131
- num_sg , PCI_DMA_BIDIRECTIONAL );
130
+ dma_unmap_sg ( & intel_private .pcidev -> dev , sg_list , num_sg ,
131
+ DMA_BIDIRECTIONAL );
132
132
133
133
st .sgl = sg_list ;
134
134
st .orig_nents = st .nents = num_sg ;
@@ -303,9 +303,9 @@ static int intel_gtt_setup_scratch_page(void)
303
303
set_pages_uc (page , 1 );
304
304
305
305
if (intel_private .needs_dmar ) {
306
- dma_addr = pci_map_page ( intel_private .pcidev , page , 0 ,
307
- PAGE_SIZE , PCI_DMA_BIDIRECTIONAL );
308
- if (pci_dma_mapping_error ( intel_private .pcidev , dma_addr )) {
306
+ dma_addr = dma_map_page ( & intel_private .pcidev -> dev , page , 0 ,
307
+ PAGE_SIZE , DMA_BIDIRECTIONAL );
308
+ if (dma_mapping_error ( & intel_private .pcidev -> dev , dma_addr )) {
309
309
__free_page (page );
310
310
return - EINVAL ;
311
311
}
@@ -552,9 +552,9 @@ static void intel_gtt_teardown_scratch_page(void)
552
552
{
553
553
set_pages_wb (intel_private .scratch_page , 1 );
554
554
if (intel_private .needs_dmar )
555
- pci_unmap_page ( intel_private .pcidev ,
556
- intel_private .scratch_page_dma ,
557
- PAGE_SIZE , PCI_DMA_BIDIRECTIONAL );
555
+ dma_unmap_page ( & intel_private .pcidev -> dev ,
556
+ intel_private .scratch_page_dma , PAGE_SIZE ,
557
+ DMA_BIDIRECTIONAL );
558
558
__free_page (intel_private .scratch_page );
559
559
}
560
560
@@ -1412,13 +1412,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1412
1412
1413
1413
if (bridge ) {
1414
1414
mask = intel_private .driver -> dma_mask_size ;
1415
- if (pci_set_dma_mask ( intel_private .pcidev , DMA_BIT_MASK (mask )))
1415
+ if (dma_set_mask ( & intel_private .pcidev -> dev , DMA_BIT_MASK (mask )))
1416
1416
dev_err (& intel_private .pcidev -> dev ,
1417
1417
"set gfx device dma mask %d-bit failed!\n" ,
1418
1418
mask );
1419
1419
else
1420
- pci_set_consistent_dma_mask ( intel_private .pcidev ,
1421
- DMA_BIT_MASK (mask ));
1420
+ dma_set_coherent_mask ( & intel_private .pcidev -> dev ,
1421
+ DMA_BIT_MASK (mask ));
1422
1422
}
1423
1423
1424
1424
if (intel_gtt_init () != 0 ) {
0 commit comments