@@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
425
425
phys_addr_t swiotlb_tbl_map_single (struct device * hwdev ,
426
426
dma_addr_t tbl_dma_addr ,
427
427
phys_addr_t orig_addr , size_t size ,
428
- enum dma_data_direction dir )
428
+ enum dma_data_direction dir ,
429
+ unsigned long attrs )
429
430
{
430
431
unsigned long flags ;
431
432
phys_addr_t tlb_addr ;
@@ -526,7 +527,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
526
527
*/
527
528
for (i = 0 ; i < nslots ; i ++ )
528
529
io_tlb_orig_addr [index + i ] = orig_addr + (i << IO_TLB_SHIFT );
529
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL )
530
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC ) &&
531
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ))
530
532
swiotlb_bounce (orig_addr , tlb_addr , size , DMA_TO_DEVICE );
531
533
532
534
return tlb_addr ;
@@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
539
541
540
542
static phys_addr_t
541
543
map_single (struct device * hwdev , phys_addr_t phys , size_t size ,
542
- enum dma_data_direction dir )
544
+ enum dma_data_direction dir , unsigned long attrs )
543
545
{
544
546
dma_addr_t start_dma_addr = phys_to_dma (hwdev , io_tlb_start );
545
547
546
- return swiotlb_tbl_map_single (hwdev , start_dma_addr , phys , size , dir );
548
+ return swiotlb_tbl_map_single (hwdev , start_dma_addr , phys , size ,
549
+ dir , attrs );
547
550
}
548
551
549
552
/*
550
553
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
551
554
*/
552
555
void swiotlb_tbl_unmap_single (struct device * hwdev , phys_addr_t tlb_addr ,
553
- size_t size , enum dma_data_direction dir )
556
+ size_t size , enum dma_data_direction dir ,
557
+ unsigned long attrs )
554
558
{
555
559
unsigned long flags ;
556
560
int i , count , nslots = ALIGN (size , 1 << IO_TLB_SHIFT ) >> IO_TLB_SHIFT ;
@@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
561
565
* First, sync the memory before unmapping the entry
562
566
*/
563
567
if (orig_addr != INVALID_PHYS_ADDR &&
568
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC ) &&
564
569
((dir == DMA_FROM_DEVICE ) || (dir == DMA_BIDIRECTIONAL )))
565
570
swiotlb_bounce (orig_addr , tlb_addr , size , DMA_FROM_DEVICE );
566
571
@@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
654
659
* GFP_DMA memory; fall back on map_single(), which
655
660
* will grab memory from the lowest available address range.
656
661
*/
657
- phys_addr_t paddr = map_single (hwdev , 0 , size , DMA_FROM_DEVICE );
662
+ phys_addr_t paddr = map_single (hwdev , 0 , size ,
663
+ DMA_FROM_DEVICE , 0 );
658
664
if (paddr == SWIOTLB_MAP_ERROR )
659
665
goto err_warn ;
660
666
@@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
667
673
(unsigned long long )dma_mask ,
668
674
(unsigned long long )dev_addr );
669
675
670
- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
676
+ /*
677
+ * DMA_TO_DEVICE to avoid memcpy in unmap_single.
678
+ * The DMA_ATTR_SKIP_CPU_SYNC is optional.
679
+ */
671
680
swiotlb_tbl_unmap_single (hwdev , paddr ,
672
- size , DMA_TO_DEVICE );
681
+ size , DMA_TO_DEVICE ,
682
+ DMA_ATTR_SKIP_CPU_SYNC );
673
683
goto err_warn ;
674
684
}
675
685
}
@@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
698
708
if (!is_swiotlb_buffer (paddr ))
699
709
free_pages ((unsigned long )vaddr , get_order (size ));
700
710
else
701
- /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
702
- swiotlb_tbl_unmap_single (hwdev , paddr , size , DMA_TO_DEVICE );
711
+ /*
712
+ * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
713
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
714
+ */
715
+ swiotlb_tbl_unmap_single (hwdev , paddr , size , DMA_TO_DEVICE ,
716
+ DMA_ATTR_SKIP_CPU_SYNC );
703
717
}
704
718
EXPORT_SYMBOL (swiotlb_free_coherent );
705
719
@@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
714
728
* When the mapping is small enough return a static buffer to limit
715
729
* the damage, or panic when the transfer is too big.
716
730
*/
717
- printk ( KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
718
- "device %s\n" , size , dev ? dev_name ( dev ) : "?" );
731
+ dev_err_ratelimited ( dev , "DMA: Out of SW-IOMMU space for %zu bytes\n" ,
732
+ size );
719
733
720
734
if (size <= io_tlb_overflow || !do_panic )
721
735
return ;
@@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
755
769
trace_swiotlb_bounced (dev , dev_addr , size , swiotlb_force );
756
770
757
771
/* Oh well, have to allocate and map a bounce buffer. */
758
- map = map_single (dev , phys , size , dir );
772
+ map = map_single (dev , phys , size , dir , attrs );
759
773
if (map == SWIOTLB_MAP_ERROR ) {
760
774
swiotlb_full (dev , size , dir , 1 );
761
775
return phys_to_dma (dev , io_tlb_overflow_buffer );
@@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
764
778
dev_addr = phys_to_dma (dev , map );
765
779
766
780
/* Ensure that the address returned is DMA'ble */
767
- if (!dma_capable (dev , dev_addr , size )) {
768
- swiotlb_tbl_unmap_single (dev , map , size , dir );
769
- return phys_to_dma (dev , io_tlb_overflow_buffer );
770
- }
781
+ if (dma_capable (dev , dev_addr , size ))
782
+ return dev_addr ;
771
783
772
- return dev_addr ;
784
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC ;
785
+ swiotlb_tbl_unmap_single (dev , map , size , dir , attrs );
786
+
787
+ return phys_to_dma (dev , io_tlb_overflow_buffer );
773
788
}
774
789
EXPORT_SYMBOL_GPL (swiotlb_map_page );
775
790
@@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
782
797
* whatever the device wrote there.
783
798
*/
784
799
static void unmap_single (struct device * hwdev , dma_addr_t dev_addr ,
785
- size_t size , enum dma_data_direction dir )
800
+ size_t size , enum dma_data_direction dir ,
801
+ unsigned long attrs )
786
802
{
787
803
phys_addr_t paddr = dma_to_phys (hwdev , dev_addr );
788
804
789
805
BUG_ON (dir == DMA_NONE );
790
806
791
807
if (is_swiotlb_buffer (paddr )) {
792
- swiotlb_tbl_unmap_single (hwdev , paddr , size , dir );
808
+ swiotlb_tbl_unmap_single (hwdev , paddr , size , dir , attrs );
793
809
return ;
794
810
}
795
811
@@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
809
825
size_t size , enum dma_data_direction dir ,
810
826
unsigned long attrs )
811
827
{
812
- unmap_single (hwdev , dev_addr , size , dir );
828
+ unmap_single (hwdev , dev_addr , size , dir , attrs );
813
829
}
814
830
EXPORT_SYMBOL_GPL (swiotlb_unmap_page );
815
831
@@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
891
907
if (swiotlb_force ||
892
908
!dma_capable (hwdev , dev_addr , sg -> length )) {
893
909
phys_addr_t map = map_single (hwdev , sg_phys (sg ),
894
- sg -> length , dir );
910
+ sg -> length , dir , attrs );
895
911
if (map == SWIOTLB_MAP_ERROR ) {
896
912
/* Don't panic here, we expect map_sg users
897
913
to do proper error handling. */
898
914
swiotlb_full (hwdev , sg -> length , dir , 0 );
915
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC ;
899
916
swiotlb_unmap_sg_attrs (hwdev , sgl , i , dir ,
900
917
attrs );
901
918
sg_dma_len (sgl ) = 0 ;
@@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
910
927
}
911
928
EXPORT_SYMBOL (swiotlb_map_sg_attrs );
912
929
913
- int
914
- swiotlb_map_sg (struct device * hwdev , struct scatterlist * sgl , int nelems ,
915
- enum dma_data_direction dir )
916
- {
917
- return swiotlb_map_sg_attrs (hwdev , sgl , nelems , dir , 0 );
918
- }
919
- EXPORT_SYMBOL (swiotlb_map_sg );
920
-
921
930
/*
922
931
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
923
932
* concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
933
942
BUG_ON (dir == DMA_NONE );
934
943
935
944
for_each_sg (sgl , sg , nelems , i )
936
- unmap_single (hwdev , sg -> dma_address , sg_dma_len (sg ), dir );
937
-
945
+ unmap_single (hwdev , sg -> dma_address , sg_dma_len (sg ), dir ,
946
+ attrs );
938
947
}
939
948
EXPORT_SYMBOL (swiotlb_unmap_sg_attrs );
940
949
941
- void
942
- swiotlb_unmap_sg (struct device * hwdev , struct scatterlist * sgl , int nelems ,
943
- enum dma_data_direction dir )
944
- {
945
- return swiotlb_unmap_sg_attrs (hwdev , sgl , nelems , dir , 0 );
946
- }
947
- EXPORT_SYMBOL (swiotlb_unmap_sg );
948
-
949
950
/*
950
951
* Make physical memory consistent for a set of streaming mode DMA translations
951
952
* after a transfer.
0 commit comments