@@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */
50
50
51
51
static u32 * iommu_gatt_base ; /* Remapping table */
52
52
53
- static dma_addr_t bad_dma_addr ;
54
-
55
53
/*
56
54
* If this is disabled the IOMMU will use an optimized flushing strategy
57
55
* of only flushing when an mapping is reused. With it true the GART is
@@ -74,8 +72,6 @@ static u32 gart_unmapped_entry;
74
72
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
75
73
#define GPTE_DECODE (x ) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
76
74
77
- #define EMERGENCY_PAGES 32 /* = 128KB */
78
-
79
75
#ifdef CONFIG_AGP
80
76
#define AGPEXTERN extern
81
77
#else
@@ -184,14 +180,6 @@ static void iommu_full(struct device *dev, size_t size, int dir)
184
180
*/
185
181
186
182
dev_err (dev , "PCI-DMA: Out of IOMMU space for %lu bytes\n" , size );
187
-
188
- if (size > PAGE_SIZE * EMERGENCY_PAGES ) {
189
- if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL )
190
- panic ("PCI-DMA: Memory would be corrupted\n" );
191
- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL )
192
- panic (KERN_ERR
193
- "PCI-DMA: Random memory would be DMAed\n" );
194
- }
195
183
#ifdef CONFIG_IOMMU_LEAK
196
184
dump_leak ();
197
185
#endif
@@ -220,7 +208,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
220
208
int i ;
221
209
222
210
if (unlikely (phys_mem + size > GART_MAX_PHYS_ADDR ))
223
- return bad_dma_addr ;
211
+ return DMA_MAPPING_ERROR ;
224
212
225
213
iommu_page = alloc_iommu (dev , npages , align_mask );
226
214
if (iommu_page == -1 ) {
@@ -229,7 +217,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
229
217
if (panic_on_overflow )
230
218
panic ("dma_map_area overflow %lu bytes\n" , size );
231
219
iommu_full (dev , size , dir );
232
- return bad_dma_addr ;
220
+ return DMA_MAPPING_ERROR ;
233
221
}
234
222
235
223
for (i = 0 ; i < npages ; i ++ ) {
@@ -271,7 +259,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
271
259
int npages ;
272
260
int i ;
273
261
274
- if (dma_addr < iommu_bus_base + EMERGENCY_PAGES * PAGE_SIZE ||
262
+ if (dma_addr == DMA_MAPPING_ERROR ||
275
263
dma_addr >= iommu_bus_base + iommu_size )
276
264
return ;
277
265
@@ -315,7 +303,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
315
303
316
304
if (nonforced_iommu (dev , addr , s -> length )) {
317
305
addr = dma_map_area (dev , addr , s -> length , dir , 0 );
318
- if (addr == bad_dma_addr ) {
306
+ if (addr == DMA_MAPPING_ERROR ) {
319
307
if (i > 0 )
320
308
gart_unmap_sg (dev , sg , i , dir , 0 );
321
309
nents = 0 ;
@@ -471,7 +459,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
471
459
472
460
iommu_full (dev , pages << PAGE_SHIFT , dir );
473
461
for_each_sg (sg , s , nents , i )
474
- s -> dma_address = bad_dma_addr ;
462
+ s -> dma_address = DMA_MAPPING_ERROR ;
475
463
return 0 ;
476
464
}
477
465
@@ -490,7 +478,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
490
478
* dma_addr = dma_map_area (dev , virt_to_phys (vaddr ), size ,
491
479
DMA_BIDIRECTIONAL , (1UL << get_order (size )) - 1 );
492
480
flush_gart ();
493
- if (unlikely (* dma_addr == bad_dma_addr ))
481
+ if (unlikely (* dma_addr == DMA_MAPPING_ERROR ))
494
482
goto out_free ;
495
483
return vaddr ;
496
484
out_free :
@@ -507,11 +495,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
507
495
dma_direct_free_pages (dev , size , vaddr , dma_addr , attrs );
508
496
}
509
497
510
- static int gart_mapping_error (struct device * dev , dma_addr_t dma_addr )
511
- {
512
- return (dma_addr == bad_dma_addr );
513
- }
514
-
515
498
static int no_agp ;
516
499
517
500
static __init unsigned long check_iommu_size (unsigned long aper , u64 aper_size )
@@ -695,7 +678,6 @@ static const struct dma_map_ops gart_dma_ops = {
695
678
.unmap_page = gart_unmap_page ,
696
679
.alloc = gart_alloc_coherent ,
697
680
.free = gart_free_coherent ,
698
- .mapping_error = gart_mapping_error ,
699
681
.dma_supported = dma_direct_supported ,
700
682
};
701
683
@@ -730,7 +712,6 @@ int __init gart_iommu_init(void)
730
712
unsigned long aper_base , aper_size ;
731
713
unsigned long start_pfn , end_pfn ;
732
714
unsigned long scratch ;
733
- long i ;
734
715
735
716
if (!amd_nb_has_feature (AMD_NB_GART ))
736
717
return 0 ;
@@ -784,19 +765,12 @@ int __init gart_iommu_init(void)
784
765
}
785
766
#endif
786
767
787
- /*
788
- * Out of IOMMU space handling.
789
- * Reserve some invalid pages at the beginning of the GART.
790
- */
791
- bitmap_set (iommu_gart_bitmap , 0 , EMERGENCY_PAGES );
792
-
793
768
pr_info ("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n" ,
794
769
iommu_size >> 20 );
795
770
796
771
agp_memory_reserved = iommu_size ;
797
772
iommu_start = aper_size - iommu_size ;
798
773
iommu_bus_base = info .aper_base + iommu_start ;
799
- bad_dma_addr = iommu_bus_base ;
800
774
iommu_gatt_base = agp_gatt_table + (iommu_start >>PAGE_SHIFT );
801
775
802
776
/*
@@ -838,8 +812,6 @@ int __init gart_iommu_init(void)
838
812
if (!scratch )
839
813
panic ("Cannot allocate iommu scratch page" );
840
814
gart_unmapped_entry = GPTE_ENCODE (__pa (scratch ));
841
- for (i = EMERGENCY_PAGES ; i < iommu_pages ; i ++ )
842
- iommu_gatt_base [i ] = gart_unmapped_entry ;
843
815
844
816
flush_gart ();
845
817
dma_ops = & gart_dma_ops ;
0 commit comments