11
11
* published by the Free Software Foundation.
12
12
*/
13
13
14
+ #include <linux/dma-mapping.h>
14
15
#include <linux/err.h>
15
16
#include <linux/slab.h>
16
17
#include <linux/interrupt.h>
29
30
#include <linux/regmap.h>
30
31
#include <linux/mfd/syscon.h>
31
32
32
- #include <asm/cacheflush.h>
33
-
34
33
#include <linux/platform_data/iommu-omap.h>
35
34
36
35
#include "omap-iopgtable.h"
@@ -454,36 +453,35 @@ static void flush_iotlb_all(struct omap_iommu *obj)
454
453
/*
455
454
* H/W pagetable operations
456
455
*/
457
- static void flush_iopgd_range (u32 * first , u32 * last )
456
+ static void flush_iopte_range (struct device * dev , dma_addr_t dma ,
457
+ unsigned long offset , int num_entries )
458
458
{
459
- /* FIXME: L2 cache should be taken care of if it exists */
460
- do {
461
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
462
- : : "r" (first ));
463
- first += L1_CACHE_BYTES / sizeof (* first );
464
- } while (first <= last );
465
- }
459
+ size_t size = num_entries * sizeof (u32 );
466
460
467
- static void flush_iopte_range (u32 * first , u32 * last )
468
- {
469
- /* FIXME: L2 cache should be taken care of if it exists */
470
- do {
471
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
472
- : : "r" (first ));
473
- first += L1_CACHE_BYTES / sizeof (* first );
474
- } while (first <= last );
461
+ dma_sync_single_range_for_device (dev , dma , offset , size , DMA_TO_DEVICE );
475
462
}
476
463
477
- static void iopte_free (u32 * iopte )
464
+ static void iopte_free (struct omap_iommu * obj , u32 * iopte , bool dma_valid )
478
465
{
466
+ dma_addr_t pt_dma ;
467
+
479
468
/* Note: freed iopte's must be clean ready for re-use */
480
- if (iopte )
469
+ if (iopte ) {
470
+ if (dma_valid ) {
471
+ pt_dma = virt_to_phys (iopte );
472
+ dma_unmap_single (obj -> dev , pt_dma , IOPTE_TABLE_SIZE ,
473
+ DMA_TO_DEVICE );
474
+ }
475
+
481
476
kmem_cache_free (iopte_cachep , iopte );
477
+ }
482
478
}
483
479
484
- static u32 * iopte_alloc (struct omap_iommu * obj , u32 * iopgd , u32 da )
480
+ static u32 * iopte_alloc (struct omap_iommu * obj , u32 * iopgd ,
481
+ dma_addr_t * pt_dma , u32 da )
485
482
{
486
483
u32 * iopte ;
484
+ unsigned long offset = iopgd_index (da ) * sizeof (da );
487
485
488
486
/* a table has already existed */
489
487
if (* iopgd )
@@ -500,18 +498,38 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
500
498
if (!iopte )
501
499
return ERR_PTR (- ENOMEM );
502
500
501
+ * pt_dma = dma_map_single (obj -> dev , iopte , IOPTE_TABLE_SIZE ,
502
+ DMA_TO_DEVICE );
503
+ if (dma_mapping_error (obj -> dev , * pt_dma )) {
504
+ dev_err (obj -> dev , "DMA map error for L2 table\n" );
505
+ iopte_free (obj , iopte , false);
506
+ return ERR_PTR (- ENOMEM );
507
+ }
508
+
509
+ /*
510
+ * we rely on dma address and the physical address to be
511
+ * the same for mapping the L2 table
512
+ */
513
+ if (WARN_ON (* pt_dma != virt_to_phys (iopte ))) {
514
+ dev_err (obj -> dev , "DMA translation error for L2 table\n" );
515
+ dma_unmap_single (obj -> dev , * pt_dma , IOPTE_TABLE_SIZE ,
516
+ DMA_TO_DEVICE );
517
+ iopte_free (obj , iopte , false);
518
+ return ERR_PTR (- ENOMEM );
519
+ }
520
+
503
521
* iopgd = virt_to_phys (iopte ) | IOPGD_TABLE ;
504
- flush_iopgd_range (iopgd , iopgd );
505
522
523
+ flush_iopte_range (obj -> dev , obj -> pd_dma , offset , 1 );
506
524
dev_vdbg (obj -> dev , "%s: a new pte:%p\n" , __func__ , iopte );
507
525
} else {
508
526
/* We raced, free the reduniovant table */
509
- iopte_free (iopte );
527
+ iopte_free (obj , iopte , false );
510
528
}
511
529
512
530
pte_ready :
513
531
iopte = iopte_offset (iopgd , da );
514
-
532
+ * pt_dma = virt_to_phys ( iopte );
515
533
dev_vdbg (obj -> dev ,
516
534
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n" ,
517
535
__func__ , da , iopgd , * iopgd , iopte , * iopte );
@@ -522,6 +540,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
522
540
static int iopgd_alloc_section (struct omap_iommu * obj , u32 da , u32 pa , u32 prot )
523
541
{
524
542
u32 * iopgd = iopgd_offset (obj , da );
543
+ unsigned long offset = iopgd_index (da ) * sizeof (da );
525
544
526
545
if ((da | pa ) & ~IOSECTION_MASK ) {
527
546
dev_err (obj -> dev , "%s: %08x:%08x should aligned on %08lx\n" ,
@@ -530,13 +549,14 @@ static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
530
549
}
531
550
532
551
* iopgd = (pa & IOSECTION_MASK ) | prot | IOPGD_SECTION ;
533
- flush_iopgd_range ( iopgd , iopgd );
552
+ flush_iopte_range ( obj -> dev , obj -> pd_dma , offset , 1 );
534
553
return 0 ;
535
554
}
536
555
537
556
static int iopgd_alloc_super (struct omap_iommu * obj , u32 da , u32 pa , u32 prot )
538
557
{
539
558
u32 * iopgd = iopgd_offset (obj , da );
559
+ unsigned long offset = iopgd_index (da ) * sizeof (da );
540
560
int i ;
541
561
542
562
if ((da | pa ) & ~IOSUPER_MASK ) {
@@ -547,20 +567,22 @@ static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
547
567
548
568
for (i = 0 ; i < 16 ; i ++ )
549
569
* (iopgd + i ) = (pa & IOSUPER_MASK ) | prot | IOPGD_SUPER ;
550
- flush_iopgd_range ( iopgd , iopgd + 15 );
570
+ flush_iopte_range ( obj -> dev , obj -> pd_dma , offset , 16 );
551
571
return 0 ;
552
572
}
553
573
554
574
static int iopte_alloc_page (struct omap_iommu * obj , u32 da , u32 pa , u32 prot )
555
575
{
556
576
u32 * iopgd = iopgd_offset (obj , da );
557
- u32 * iopte = iopte_alloc (obj , iopgd , da );
577
+ dma_addr_t pt_dma ;
578
+ u32 * iopte = iopte_alloc (obj , iopgd , & pt_dma , da );
579
+ unsigned long offset = iopte_index (da ) * sizeof (da );
558
580
559
581
if (IS_ERR (iopte ))
560
582
return PTR_ERR (iopte );
561
583
562
584
* iopte = (pa & IOPAGE_MASK ) | prot | IOPTE_SMALL ;
563
- flush_iopte_range (iopte , iopte );
585
+ flush_iopte_range (obj -> dev , pt_dma , offset , 1 );
564
586
565
587
dev_vdbg (obj -> dev , "%s: da:%08x pa:%08x pte:%p *pte:%08x\n" ,
566
588
__func__ , da , pa , iopte , * iopte );
@@ -571,7 +593,9 @@ static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
571
593
static int iopte_alloc_large (struct omap_iommu * obj , u32 da , u32 pa , u32 prot )
572
594
{
573
595
u32 * iopgd = iopgd_offset (obj , da );
574
- u32 * iopte = iopte_alloc (obj , iopgd , da );
596
+ dma_addr_t pt_dma ;
597
+ u32 * iopte = iopte_alloc (obj , iopgd , & pt_dma , da );
598
+ unsigned long offset = iopte_index (da ) * sizeof (da );
575
599
int i ;
576
600
577
601
if ((da | pa ) & ~IOLARGE_MASK ) {
@@ -585,7 +609,7 @@ static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
585
609
586
610
for (i = 0 ; i < 16 ; i ++ )
587
611
* (iopte + i ) = (pa & IOLARGE_MASK ) | prot | IOPTE_LARGE ;
588
- flush_iopte_range (iopte , iopte + 15 );
612
+ flush_iopte_range (obj -> dev , pt_dma , offset , 16 );
589
613
return 0 ;
590
614
}
591
615
@@ -674,6 +698,9 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
674
698
size_t bytes ;
675
699
u32 * iopgd = iopgd_offset (obj , da );
676
700
int nent = 1 ;
701
+ dma_addr_t pt_dma ;
702
+ unsigned long pd_offset = iopgd_index (da ) * sizeof (da );
703
+ unsigned long pt_offset = iopte_index (da ) * sizeof (da );
677
704
678
705
if (!* iopgd )
679
706
return 0 ;
@@ -690,7 +717,8 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
690
717
}
691
718
bytes *= nent ;
692
719
memset (iopte , 0 , nent * sizeof (* iopte ));
693
- flush_iopte_range (iopte , iopte + (nent - 1 ) * sizeof (* iopte ));
720
+ pt_dma = virt_to_phys (iopte );
721
+ flush_iopte_range (obj -> dev , pt_dma , pt_offset , nent );
694
722
695
723
/*
696
724
* do table walk to check if this table is necessary or not
@@ -700,7 +728,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
700
728
if (iopte [i ])
701
729
goto out ;
702
730
703
- iopte_free (iopte );
731
+ iopte_free (obj , iopte , true );
704
732
nent = 1 ; /* for the next L1 entry */
705
733
} else {
706
734
bytes = IOPGD_SIZE ;
@@ -712,7 +740,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
712
740
bytes *= nent ;
713
741
}
714
742
memset (iopgd , 0 , nent * sizeof (* iopgd ));
715
- flush_iopgd_range ( iopgd , iopgd + ( nent - 1 ) * sizeof ( * iopgd ) );
743
+ flush_iopte_range ( obj -> dev , obj -> pd_dma , pd_offset , nent );
716
744
out :
717
745
return bytes ;
718
746
}
@@ -738,6 +766,7 @@ static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
738
766
739
767
static void iopgtable_clear_entry_all (struct omap_iommu * obj )
740
768
{
769
+ unsigned long offset ;
741
770
int i ;
742
771
743
772
spin_lock (& obj -> page_table_lock );
@@ -748,15 +777,16 @@ static void iopgtable_clear_entry_all(struct omap_iommu *obj)
748
777
749
778
da = i << IOPGD_SHIFT ;
750
779
iopgd = iopgd_offset (obj , da );
780
+ offset = iopgd_index (da ) * sizeof (da );
751
781
752
782
if (!* iopgd )
753
783
continue ;
754
784
755
785
if (iopgd_is_table (* iopgd ))
756
- iopte_free (iopte_offset (iopgd , 0 ));
786
+ iopte_free (obj , iopte_offset (iopgd , 0 ), true );
757
787
758
788
* iopgd = 0 ;
759
- flush_iopgd_range ( iopgd , iopgd );
789
+ flush_iopte_range ( obj -> dev , obj -> pd_dma , offset , 1 );
760
790
}
761
791
762
792
flush_iotlb_all (obj );
@@ -815,10 +845,18 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
815
845
816
846
spin_lock (& obj -> iommu_lock );
817
847
848
+ obj -> pd_dma = dma_map_single (obj -> dev , iopgd , IOPGD_TABLE_SIZE ,
849
+ DMA_TO_DEVICE );
850
+ if (dma_mapping_error (obj -> dev , obj -> pd_dma )) {
851
+ dev_err (obj -> dev , "DMA map error for L1 table\n" );
852
+ err = - ENOMEM ;
853
+ goto out_err ;
854
+ }
855
+
818
856
obj -> iopgd = iopgd ;
819
857
err = iommu_enable (obj );
820
858
if (err )
821
- goto err_enable ;
859
+ goto out_err ;
822
860
flush_iotlb_all (obj );
823
861
824
862
spin_unlock (& obj -> iommu_lock );
@@ -827,7 +865,7 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
827
865
828
866
return 0 ;
829
867
830
- err_enable :
868
+ out_err :
831
869
spin_unlock (& obj -> iommu_lock );
832
870
833
871
return err ;
@@ -844,7 +882,10 @@ static void omap_iommu_detach(struct omap_iommu *obj)
844
882
845
883
spin_lock (& obj -> iommu_lock );
846
884
885
+ dma_unmap_single (obj -> dev , obj -> pd_dma , IOPGD_TABLE_SIZE ,
886
+ DMA_TO_DEVICE );
847
887
iommu_disable (obj );
888
+ obj -> pd_dma = 0 ;
848
889
obj -> iopgd = NULL ;
849
890
850
891
spin_unlock (& obj -> iommu_lock );
@@ -1008,11 +1049,6 @@ static struct platform_driver omap_iommu_driver = {
1008
1049
},
1009
1050
};
1010
1051
1011
- static void iopte_cachep_ctor (void * iopte )
1012
- {
1013
- clean_dcache_area (iopte , IOPTE_TABLE_SIZE );
1014
- }
1015
-
1016
1052
static u32 iotlb_init_entry (struct iotlb_entry * e , u32 da , u32 pa , int pgsz )
1017
1053
{
1018
1054
memset (e , 0 , sizeof (* e ));
@@ -1159,7 +1195,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1159
1195
if (WARN_ON (!IS_ALIGNED ((long )omap_domain -> pgtable , IOPGD_TABLE_SIZE )))
1160
1196
goto fail_align ;
1161
1197
1162
- clean_dcache_area (omap_domain -> pgtable , IOPGD_TABLE_SIZE );
1163
1198
spin_lock_init (& omap_domain -> lock );
1164
1199
1165
1200
omap_domain -> domain .geometry .aperture_start = 0 ;
@@ -1347,7 +1382,7 @@ static int __init omap_iommu_init(void)
1347
1382
of_node_put (np );
1348
1383
1349
1384
p = kmem_cache_create ("iopte_cache" , IOPTE_TABLE_SIZE , align , flags ,
1350
- iopte_cachep_ctor );
1385
+ NULL );
1351
1386
if (!p )
1352
1387
return - ENOMEM ;
1353
1388
iopte_cachep = p ;
0 commit comments