79
79
80
80
struct rk_iommu_domain {
81
81
struct list_head iommus ;
82
- struct platform_device * pdev ;
83
82
u32 * dt ; /* page directory table */
84
83
dma_addr_t dt_dma ;
85
84
spinlock_t iommus_lock ; /* lock for iommus list */
@@ -105,12 +104,14 @@ struct rk_iommu {
105
104
struct iommu_domain * domain ; /* domain to which iommu is attached */
106
105
};
107
106
107
+ static struct device * dma_dev ;
108
+
108
109
static inline void rk_table_flush (struct rk_iommu_domain * dom , dma_addr_t dma ,
109
110
unsigned int count )
110
111
{
111
112
size_t size = count * sizeof (u32 ); /* count of u32 entry */
112
113
113
- dma_sync_single_for_device (& dom -> pdev -> dev , dma , size , DMA_TO_DEVICE );
114
+ dma_sync_single_for_device (dma_dev , dma , size , DMA_TO_DEVICE );
114
115
}
115
116
116
117
static struct rk_iommu_domain * to_rk_domain (struct iommu_domain * dom )
@@ -625,7 +626,6 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
625
626
static u32 * rk_dte_get_page_table (struct rk_iommu_domain * rk_domain ,
626
627
dma_addr_t iova )
627
628
{
628
- struct device * dev = & rk_domain -> pdev -> dev ;
629
629
u32 * page_table , * dte_addr ;
630
630
u32 dte_index , dte ;
631
631
phys_addr_t pt_phys ;
@@ -643,9 +643,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
643
643
if (!page_table )
644
644
return ERR_PTR (- ENOMEM );
645
645
646
- pt_dma = dma_map_single (dev , page_table , SPAGE_SIZE , DMA_TO_DEVICE );
647
- if (dma_mapping_error (dev , pt_dma )) {
648
- dev_err (dev , "DMA mapping error while allocating page table\n" );
646
+ pt_dma = dma_map_single (dma_dev , page_table , SPAGE_SIZE , DMA_TO_DEVICE );
647
+ if (dma_mapping_error (dma_dev , pt_dma )) {
648
+ dev_err (dma_dev , "DMA mapping error while allocating page table\n" );
649
649
free_page ((unsigned long )page_table );
650
650
return ERR_PTR (- ENOMEM );
651
651
}
@@ -911,29 +911,20 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
911
911
static struct iommu_domain * rk_iommu_domain_alloc (unsigned type )
912
912
{
913
913
struct rk_iommu_domain * rk_domain ;
914
- struct platform_device * pdev ;
915
- struct device * iommu_dev ;
916
914
917
915
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA )
918
916
return NULL ;
919
917
920
- /* Register a pdev per domain, so DMA API can base on this *dev
921
- * even some virtual master doesn't have an iommu slave
922
- */
923
- pdev = platform_device_register_simple ("rk_iommu_domain" ,
924
- PLATFORM_DEVID_AUTO , NULL , 0 );
925
- if (IS_ERR (pdev ))
918
+ if (!dma_dev )
926
919
return NULL ;
927
920
928
- rk_domain = devm_kzalloc (& pdev -> dev , sizeof (* rk_domain ), GFP_KERNEL );
921
+ rk_domain = devm_kzalloc (dma_dev , sizeof (* rk_domain ), GFP_KERNEL );
929
922
if (!rk_domain )
930
- goto err_unreg_pdev ;
931
-
932
- rk_domain -> pdev = pdev ;
923
+ return NULL ;
933
924
934
925
if (type == IOMMU_DOMAIN_DMA &&
935
926
iommu_get_dma_cookie (& rk_domain -> domain ))
936
- goto err_unreg_pdev ;
927
+ return NULL ;
937
928
938
929
/*
939
930
* rk32xx iommus use a 2 level pagetable.
@@ -944,11 +935,10 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
944
935
if (!rk_domain -> dt )
945
936
goto err_put_cookie ;
946
937
947
- iommu_dev = & pdev -> dev ;
948
- rk_domain -> dt_dma = dma_map_single (iommu_dev , rk_domain -> dt ,
938
+ rk_domain -> dt_dma = dma_map_single (dma_dev , rk_domain -> dt ,
949
939
SPAGE_SIZE , DMA_TO_DEVICE );
950
- if (dma_mapping_error (iommu_dev , rk_domain -> dt_dma )) {
951
- dev_err (iommu_dev , "DMA map error for DT\n" );
940
+ if (dma_mapping_error (dma_dev , rk_domain -> dt_dma )) {
941
+ dev_err (dma_dev , "DMA map error for DT\n" );
952
942
goto err_free_dt ;
953
943
}
954
944
@@ -969,8 +959,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
969
959
err_put_cookie :
970
960
if (type == IOMMU_DOMAIN_DMA )
971
961
iommu_put_dma_cookie (& rk_domain -> domain );
972
- err_unreg_pdev :
973
- platform_device_unregister (pdev );
974
962
975
963
return NULL ;
976
964
}
@@ -987,20 +975,18 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
987
975
if (rk_dte_is_pt_valid (dte )) {
988
976
phys_addr_t pt_phys = rk_dte_pt_address (dte );
989
977
u32 * page_table = phys_to_virt (pt_phys );
990
- dma_unmap_single (& rk_domain -> pdev -> dev , pt_phys ,
978
+ dma_unmap_single (dma_dev , pt_phys ,
991
979
SPAGE_SIZE , DMA_TO_DEVICE );
992
980
free_page ((unsigned long )page_table );
993
981
}
994
982
}
995
983
996
- dma_unmap_single (& rk_domain -> pdev -> dev , rk_domain -> dt_dma ,
984
+ dma_unmap_single (dma_dev , rk_domain -> dt_dma ,
997
985
SPAGE_SIZE , DMA_TO_DEVICE );
998
986
free_page ((unsigned long )rk_domain -> dt );
999
987
1000
988
if (domain -> type == IOMMU_DOMAIN_DMA )
1001
989
iommu_put_dma_cookie (& rk_domain -> domain );
1002
-
1003
- platform_device_unregister (rk_domain -> pdev );
1004
990
}
1005
991
1006
992
static bool rk_iommu_is_dev_iommu_master (struct device * dev )
@@ -1123,30 +1109,6 @@ static const struct iommu_ops rk_iommu_ops = {
1123
1109
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP ,
1124
1110
};
1125
1111
1126
- static int rk_iommu_domain_probe (struct platform_device * pdev )
1127
- {
1128
- struct device * dev = & pdev -> dev ;
1129
-
1130
- dev -> dma_parms = devm_kzalloc (dev , sizeof (* dev -> dma_parms ), GFP_KERNEL );
1131
- if (!dev -> dma_parms )
1132
- return - ENOMEM ;
1133
-
1134
- /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
1135
- arch_setup_dma_ops (dev , 0 , DMA_BIT_MASK (32 ), NULL , false);
1136
-
1137
- dma_set_max_seg_size (dev , DMA_BIT_MASK (32 ));
1138
- dma_coerce_mask_and_coherent (dev , DMA_BIT_MASK (32 ));
1139
-
1140
- return 0 ;
1141
- }
1142
-
1143
- static struct platform_driver rk_iommu_domain_driver = {
1144
- .probe = rk_iommu_domain_probe ,
1145
- .driver = {
1146
- .name = "rk_iommu_domain" ,
1147
- },
1148
- };
1149
-
1150
1112
static int rk_iommu_probe (struct platform_device * pdev )
1151
1113
{
1152
1114
struct device * dev = & pdev -> dev ;
@@ -1220,6 +1182,14 @@ static int rk_iommu_probe(struct platform_device *pdev)
1220
1182
if (err )
1221
1183
goto err_remove_sysfs ;
1222
1184
1185
+ /*
1186
+ * Use the first registered IOMMU device for domain to use with DMA
1187
+ * API, since a domain might not physically correspond to a single
1188
+ * IOMMU device..
1189
+ */
1190
+ if (!dma_dev )
1191
+ dma_dev = & pdev -> dev ;
1192
+
1223
1193
return 0 ;
1224
1194
err_remove_sysfs :
1225
1195
iommu_device_sysfs_remove (& iommu -> iommu );
@@ -1276,14 +1246,7 @@ static int __init rk_iommu_init(void)
1276
1246
if (ret )
1277
1247
return ret ;
1278
1248
1279
- ret = platform_driver_register (& rk_iommu_domain_driver );
1280
- if (ret )
1281
- return ret ;
1282
-
1283
- ret = platform_driver_register (& rk_iommu_driver );
1284
- if (ret )
1285
- platform_driver_unregister (& rk_iommu_domain_driver );
1286
- return ret ;
1249
+ return platform_driver_register (& rk_iommu_driver );
1287
1250
}
1288
1251
subsys_initcall (rk_iommu_init );
1289
1252
0 commit comments