@@ -51,14 +51,16 @@ struct tegra_smmu {
51
51
struct iommu_device iommu ; /* IOMMU Core code handle */
52
52
};
53
53
54
+ struct tegra_pd ;
55
+
54
56
struct tegra_smmu_as {
55
57
struct iommu_domain domain ;
56
58
struct tegra_smmu * smmu ;
57
59
unsigned int use_count ;
58
60
spinlock_t lock ;
59
61
u32 * count ;
60
62
struct page * * pts ;
61
- struct page * pd ;
63
+ struct tegra_pd * pd ;
62
64
dma_addr_t pd_dma ;
63
65
unsigned id ;
64
66
u32 attr ;
@@ -155,6 +157,10 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
155
157
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
156
158
SMMU_PDE_NONSECURE)
157
159
160
+ struct tegra_pd {
161
+ u32 val [SMMU_NUM_PDE ];
162
+ };
163
+
158
164
static unsigned int iova_pd_index (unsigned long iova )
159
165
{
160
166
return (iova >> SMMU_PDE_SHIFT ) & (SMMU_NUM_PDE - 1 );
@@ -284,23 +290,23 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
284
290
285
291
as -> attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE ;
286
292
287
- as -> pd = __iommu_alloc_pages (GFP_KERNEL | __GFP_DMA , 0 );
293
+ as -> pd = iommu_alloc_page (GFP_KERNEL | __GFP_DMA );
288
294
if (!as -> pd ) {
289
295
kfree (as );
290
296
return NULL ;
291
297
}
292
298
293
299
as -> count = kcalloc (SMMU_NUM_PDE , sizeof (u32 ), GFP_KERNEL );
294
300
if (!as -> count ) {
295
- __iommu_free_pages (as -> pd , 0 );
301
+ iommu_free_page (as -> pd );
296
302
kfree (as );
297
303
return NULL ;
298
304
}
299
305
300
306
as -> pts = kcalloc (SMMU_NUM_PDE , sizeof (* as -> pts ), GFP_KERNEL );
301
307
if (!as -> pts ) {
302
308
kfree (as -> count );
303
- __iommu_free_pages (as -> pd , 0 );
309
+ iommu_free_page (as -> pd );
304
310
kfree (as );
305
311
return NULL ;
306
312
}
@@ -417,8 +423,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
417
423
goto unlock ;
418
424
}
419
425
420
- as -> pd_dma = dma_map_page ( smmu -> dev , as -> pd , 0 , SMMU_SIZE_PD ,
421
- DMA_TO_DEVICE );
426
+ as -> pd_dma =
427
+ dma_map_single ( smmu -> dev , as -> pd , SMMU_SIZE_PD , DMA_TO_DEVICE );
422
428
if (dma_mapping_error (smmu -> dev , as -> pd_dma )) {
423
429
err = - ENOMEM ;
424
430
goto unlock ;
@@ -450,7 +456,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
450
456
return 0 ;
451
457
452
458
err_unmap :
453
- dma_unmap_page (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
459
+ dma_unmap_single (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
454
460
unlock :
455
461
mutex_unlock (& smmu -> lock );
456
462
@@ -469,7 +475,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
469
475
470
476
tegra_smmu_free_asid (smmu , as -> id );
471
477
472
- dma_unmap_page (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
478
+ dma_unmap_single (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
473
479
474
480
as -> smmu = NULL ;
475
481
@@ -548,11 +554,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
548
554
{
549
555
unsigned int pd_index = iova_pd_index (iova );
550
556
struct tegra_smmu * smmu = as -> smmu ;
551
- u32 * pd = page_address ( as -> pd ) ;
557
+ struct tegra_pd * pd = as -> pd ;
552
558
unsigned long offset = pd_index * sizeof (* pd );
553
559
554
560
/* Set the page directory entry first */
555
- pd [pd_index ] = value ;
561
+ pd -> val [pd_index ] = value ;
556
562
557
563
/* The flush the page directory entry from caches */
558
564
dma_sync_single_range_for_device (smmu -> dev , as -> pd_dma , offset ,
@@ -577,14 +583,12 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
577
583
unsigned int pd_index = iova_pd_index (iova );
578
584
struct tegra_smmu * smmu = as -> smmu ;
579
585
struct page * pt_page ;
580
- u32 * pd ;
581
586
582
587
pt_page = as -> pts [pd_index ];
583
588
if (!pt_page )
584
589
return NULL ;
585
590
586
- pd = page_address (as -> pd );
587
- * dmap = smmu_pde_to_dma (smmu , pd [pd_index ]);
591
+ * dmap = smmu_pde_to_dma (smmu , as -> pd -> val [pd_index ]);
588
592
589
593
return tegra_smmu_pte_offset (pt_page , iova );
590
594
}
@@ -619,9 +623,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
619
623
620
624
* dmap = dma ;
621
625
} else {
622
- u32 * pd = page_address (as -> pd );
623
-
624
- * dmap = smmu_pde_to_dma (smmu , pd [pde ]);
626
+ * dmap = smmu_pde_to_dma (smmu , as -> pd -> val [pde ]);
625
627
}
626
628
627
629
return tegra_smmu_pte_offset (as -> pts [pde ], iova );
@@ -645,8 +647,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
645
647
*/
646
648
if (-- as -> count [pde ] == 0 ) {
647
649
struct tegra_smmu * smmu = as -> smmu ;
648
- u32 * pd = page_address (as -> pd );
649
- dma_addr_t pte_dma = smmu_pde_to_dma (smmu , pd [pde ]);
650
+ dma_addr_t pte_dma = smmu_pde_to_dma (smmu , as -> pd -> val [pde ]);
650
651
651
652
tegra_smmu_set_pde (as , iova , 0 );
652
653
0 commit comments