Skip to content

Commit b5cab0d

Browse files
committed
Merge branch 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk: - minor fixes (rate limiting), remove certain functions - support for DMA_ATTR_SKIP_CPU_SYNC which is an optimization in the DMA API * 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: swiotlb: Minor fix-ups for DMA_ATTR_SKIP_CPU_SYNC support swiotlb: Add support for DMA_ATTR_SKIP_CPU_SYNC swiotlb-xen: Enforce return of DMA_ERROR_CODE in mapping function swiotlb: Drop unused functions swiotlb_map_sg and swiotlb_unmap_sg swiotlb: Rate-limit printing when running out of SW-IOMMU space
2 parents 93173b5 + d29fa0c commit b5cab0d

File tree

6 files changed

+57
-70
lines changed

6 files changed

+57
-70
lines changed

arch/arm/xen/mm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
186186
EXPORT_SYMBOL(xen_dma_ops);
187187

188188
static struct dma_map_ops xen_swiotlb_dma_ops = {
189-
.mapping_error = xen_swiotlb_dma_mapping_error,
190189
.alloc = xen_swiotlb_alloc_coherent,
191190
.free = xen_swiotlb_free_coherent,
192191
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,

arch/x86/xen/pci-swiotlb-xen.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
int xen_swiotlb __read_mostly;
2020

2121
static struct dma_map_ops xen_swiotlb_dma_ops = {
22-
.mapping_error = xen_swiotlb_dma_mapping_error,
2322
.alloc = xen_swiotlb_alloc_coherent,
2423
.free = xen_swiotlb_free_coherent,
2524
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,

drivers/xen/swiotlb-xen.c

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
405405
*/
406406
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
407407

408-
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
408+
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
409+
attrs);
409410
if (map == SWIOTLB_MAP_ERROR)
410411
return DMA_ERROR_CODE;
411412

@@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
416417
/*
417418
* Ensure that the address returned is DMA'ble
418419
*/
419-
if (!dma_capable(dev, dev_addr, size)) {
420-
swiotlb_tbl_unmap_single(dev, map, size, dir);
421-
dev_addr = 0;
422-
}
423-
return dev_addr;
420+
if (dma_capable(dev, dev_addr, size))
421+
return dev_addr;
422+
423+
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
424+
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
425+
426+
return DMA_ERROR_CODE;
424427
}
425428
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
426429

@@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
444447

445448
/* NOTE: We use dev_addr here, not paddr! */
446449
if (is_xen_swiotlb_buffer(dev_addr)) {
447-
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
450+
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
448451
return;
449452
}
450453

@@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
557560
start_dma_addr,
558561
sg_phys(sg),
559562
sg->length,
560-
dir);
563+
dir, attrs);
561564
if (map == SWIOTLB_MAP_ERROR) {
562565
dev_warn(hwdev, "swiotlb buffer is full\n");
563566
/* Don't panic here, we expect map_sg users
564567
to do proper error handling. */
568+
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
565569
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
566570
attrs);
567571
sg_dma_len(sgl) = 0;
@@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
648652
}
649653
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
650654

651-
int
652-
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
653-
{
654-
return !dma_addr;
655-
}
656-
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
657-
658655
/*
659656
* Return whether the given device DMA address mask can be supported
660657
* properly. For example, if your device can only drive the low 24-bits

include/linux/swiotlb.h

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,13 @@ enum dma_sync_target {
4444
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
4545
dma_addr_t tbl_dma_addr,
4646
phys_addr_t phys, size_t size,
47-
enum dma_data_direction dir);
47+
enum dma_data_direction dir,
48+
unsigned long attrs);
4849

4950
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
5051
phys_addr_t tlb_addr,
51-
size_t size, enum dma_data_direction dir);
52+
size_t size, enum dma_data_direction dir,
53+
unsigned long attrs);
5254

5355
extern void swiotlb_tbl_sync_single(struct device *hwdev,
5456
phys_addr_t tlb_addr,
@@ -72,14 +74,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
7274
size_t size, enum dma_data_direction dir,
7375
unsigned long attrs);
7476

75-
extern int
76-
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
77-
enum dma_data_direction dir);
78-
79-
extern void
80-
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
81-
enum dma_data_direction dir);
82-
8377
extern int
8478
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
8579
enum dma_data_direction dir,

include/xen/swiotlb-xen.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,6 @@ extern void
5050
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
5151
int nelems, enum dma_data_direction dir);
5252

53-
extern int
54-
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
55-
5653
extern int
5754
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
5855

lib/swiotlb.c

Lines changed: 41 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
425425
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
426426
dma_addr_t tbl_dma_addr,
427427
phys_addr_t orig_addr, size_t size,
428-
enum dma_data_direction dir)
428+
enum dma_data_direction dir,
429+
unsigned long attrs)
429430
{
430431
unsigned long flags;
431432
phys_addr_t tlb_addr;
@@ -526,7 +527,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
526527
*/
527528
for (i = 0; i < nslots; i++)
528529
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
529-
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
530+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
531+
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
530532
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
531533

532534
return tlb_addr;
@@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
539541

540542
static phys_addr_t
541543
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
542-
enum dma_data_direction dir)
544+
enum dma_data_direction dir, unsigned long attrs)
543545
{
544546
dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
545547

546-
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
548+
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
549+
dir, attrs);
547550
}
548551

549552
/*
550553
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
551554
*/
552555
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
553-
size_t size, enum dma_data_direction dir)
556+
size_t size, enum dma_data_direction dir,
557+
unsigned long attrs)
554558
{
555559
unsigned long flags;
556560
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
561565
* First, sync the memory before unmapping the entry
562566
*/
563567
if (orig_addr != INVALID_PHYS_ADDR &&
568+
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
564569
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
565570
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
566571

@@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
654659
* GFP_DMA memory; fall back on map_single(), which
655660
* will grab memory from the lowest available address range.
656661
*/
657-
phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
662+
phys_addr_t paddr = map_single(hwdev, 0, size,
663+
DMA_FROM_DEVICE, 0);
658664
if (paddr == SWIOTLB_MAP_ERROR)
659665
goto err_warn;
660666

@@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
667673
(unsigned long long)dma_mask,
668674
(unsigned long long)dev_addr);
669675

670-
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
676+
/*
677+
* DMA_TO_DEVICE to avoid memcpy in unmap_single.
678+
* The DMA_ATTR_SKIP_CPU_SYNC is optional.
679+
*/
671680
swiotlb_tbl_unmap_single(hwdev, paddr,
672-
size, DMA_TO_DEVICE);
681+
size, DMA_TO_DEVICE,
682+
DMA_ATTR_SKIP_CPU_SYNC);
673683
goto err_warn;
674684
}
675685
}
@@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
698708
if (!is_swiotlb_buffer(paddr))
699709
free_pages((unsigned long)vaddr, get_order(size));
700710
else
701-
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
702-
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
711+
/*
712+
* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
713+
* DMA_ATTR_SKIP_CPU_SYNC is optional.
714+
*/
715+
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
716+
DMA_ATTR_SKIP_CPU_SYNC);
703717
}
704718
EXPORT_SYMBOL(swiotlb_free_coherent);
705719

@@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
714728
* When the mapping is small enough return a static buffer to limit
715729
* the damage, or panic when the transfer is too big.
716730
*/
717-
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
718-
"device %s\n", size, dev ? dev_name(dev) : "?");
731+
dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
732+
size);
719733

720734
if (size <= io_tlb_overflow || !do_panic)
721735
return;
@@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
755769
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
756770

757771
/* Oh well, have to allocate and map a bounce buffer. */
758-
map = map_single(dev, phys, size, dir);
772+
map = map_single(dev, phys, size, dir, attrs);
759773
if (map == SWIOTLB_MAP_ERROR) {
760774
swiotlb_full(dev, size, dir, 1);
761775
return phys_to_dma(dev, io_tlb_overflow_buffer);
@@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
764778
dev_addr = phys_to_dma(dev, map);
765779

766780
/* Ensure that the address returned is DMA'ble */
767-
if (!dma_capable(dev, dev_addr, size)) {
768-
swiotlb_tbl_unmap_single(dev, map, size, dir);
769-
return phys_to_dma(dev, io_tlb_overflow_buffer);
770-
}
781+
if (dma_capable(dev, dev_addr, size))
782+
return dev_addr;
771783

772-
return dev_addr;
784+
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
785+
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
786+
787+
return phys_to_dma(dev, io_tlb_overflow_buffer);
773788
}
774789
EXPORT_SYMBOL_GPL(swiotlb_map_page);
775790

@@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
782797
* whatever the device wrote there.
783798
*/
784799
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
785-
size_t size, enum dma_data_direction dir)
800+
size_t size, enum dma_data_direction dir,
801+
unsigned long attrs)
786802
{
787803
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
788804

789805
BUG_ON(dir == DMA_NONE);
790806

791807
if (is_swiotlb_buffer(paddr)) {
792-
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
808+
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
793809
return;
794810
}
795811

@@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
809825
size_t size, enum dma_data_direction dir,
810826
unsigned long attrs)
811827
{
812-
unmap_single(hwdev, dev_addr, size, dir);
828+
unmap_single(hwdev, dev_addr, size, dir, attrs);
813829
}
814830
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
815831

@@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
891907
if (swiotlb_force ||
892908
!dma_capable(hwdev, dev_addr, sg->length)) {
893909
phys_addr_t map = map_single(hwdev, sg_phys(sg),
894-
sg->length, dir);
910+
sg->length, dir, attrs);
895911
if (map == SWIOTLB_MAP_ERROR) {
896912
/* Don't panic here, we expect map_sg users
897913
to do proper error handling. */
898914
swiotlb_full(hwdev, sg->length, dir, 0);
915+
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
899916
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
900917
attrs);
901918
sg_dma_len(sgl) = 0;
@@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
910927
}
911928
EXPORT_SYMBOL(swiotlb_map_sg_attrs);
912929

913-
int
914-
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
915-
enum dma_data_direction dir)
916-
{
917-
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
918-
}
919-
EXPORT_SYMBOL(swiotlb_map_sg);
920-
921930
/*
922931
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
923932
* concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
933942
BUG_ON(dir == DMA_NONE);
934943

935944
for_each_sg(sgl, sg, nelems, i)
936-
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
937-
945+
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
946+
attrs);
938947
}
939948
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
940949

941-
void
942-
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
943-
enum dma_data_direction dir)
944-
{
945-
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
946-
}
947-
EXPORT_SYMBOL(swiotlb_unmap_sg);
948-
949950
/*
950951
* Make physical memory consistent for a set of streaming mode DMA translations
951952
* after a transfer.

0 commit comments

Comments
 (0)