|
39 | 39 | #ifdef CONFIG_DEBUG_FS
|
40 | 40 | #include <linux/debugfs.h>
|
41 | 41 | #endif
|
| 42 | +#ifdef CONFIG_DMA_RESTRICTED_POOL |
| 43 | +#include <linux/io.h> |
| 44 | +#include <linux/of.h> |
| 45 | +#include <linux/of_fdt.h> |
| 46 | +#include <linux/of_reserved_mem.h> |
| 47 | +#include <linux/slab.h> |
| 48 | +#endif |
42 | 49 |
|
43 | 50 | #include <asm/io.h>
|
44 | 51 | #include <asm/dma.h>
|
@@ -735,4 +742,73 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size)
|
735 | 742 | return true;
|
736 | 743 | }
|
737 | 744 |
|
| 745 | +static int rmem_swiotlb_device_init(struct reserved_mem *rmem, |
| 746 | + struct device *dev) |
| 747 | +{ |
| 748 | + struct io_tlb_mem *mem = rmem->priv; |
| 749 | + unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; |
| 750 | + |
| 751 | + /* |
| 752 | + * Since multiple devices can share the same pool, the private data, |
| 753 | + * io_tlb_mem struct, will be initialized by the first device attached |
| 754 | + * to it. |
| 755 | + */ |
| 756 | + if (!mem) { |
| 757 | + mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); |
| 758 | + if (!mem) |
| 759 | + return -ENOMEM; |
| 760 | + |
| 761 | + set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), |
| 762 | + rmem->size >> PAGE_SHIFT); |
| 763 | + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); |
| 764 | + mem->force_bounce = true; |
| 765 | + mem->for_alloc = true; |
| 766 | + |
| 767 | + rmem->priv = mem; |
| 768 | + |
| 769 | + if (IS_ENABLED(CONFIG_DEBUG_FS)) { |
| 770 | + mem->debugfs = |
| 771 | + debugfs_create_dir(rmem->name, debugfs_dir); |
| 772 | + swiotlb_create_debugfs_files(mem); |
| 773 | + } |
| 774 | + } |
| 775 | + |
| 776 | + dev->dma_io_tlb_mem = mem; |
| 777 | + |
| 778 | + return 0; |
| 779 | +} |
| 780 | + |
| 781 | +static void rmem_swiotlb_device_release(struct reserved_mem *rmem, |
| 782 | + struct device *dev) |
| 783 | +{ |
| 784 | + dev->dma_io_tlb_mem = io_tlb_default_mem; |
| 785 | +} |
| 786 | + |
| 787 | +static const struct reserved_mem_ops rmem_swiotlb_ops = { |
| 788 | + .device_init = rmem_swiotlb_device_init, |
| 789 | + .device_release = rmem_swiotlb_device_release, |
| 790 | +}; |
| 791 | + |
| 792 | +static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) |
| 793 | +{ |
| 794 | + unsigned long node = rmem->fdt_node; |
| 795 | + |
| 796 | + if (of_get_flat_dt_prop(node, "reusable", NULL) || |
| 797 | + of_get_flat_dt_prop(node, "linux,cma-default", NULL) || |
| 798 | + of_get_flat_dt_prop(node, "linux,dma-default", NULL) || |
| 799 | + of_get_flat_dt_prop(node, "no-map", NULL)) |
| 800 | + return -EINVAL; |
| 801 | + |
| 802 | + if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { |
| 803 | + pr_err("Restricted DMA pool must be accessible within the linear mapping."); |
| 804 | + return -EINVAL; |
| 805 | + } |
| 806 | + |
| 807 | + rmem->ops = &rmem_swiotlb_ops; |
| 808 | + pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", |
| 809 | + &rmem->base, (unsigned long)rmem->size / SZ_1M); |
| 810 | + return 0; |
| 811 | +} |
| 812 | + |
| 813 | +RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); |
738 | 814 | #endif /* CONFIG_DMA_RESTRICTED_POOL */
|
0 commit comments