|
34 | 34 | #include <linux/msi.h>
|
35 | 35 |
|
36 | 36 | #include "dma-iommu.h"
|
| 37 | +#include "iommu-priv.h" |
37 | 38 |
|
38 | 39 | #include "iommu-sva.h"
|
| 40 | +#include "iommu-priv.h" |
39 | 41 |
|
40 | 42 | static struct kset *iommu_group_kset;
|
41 | 43 | static DEFINE_IDA(iommu_group_ida);
|
@@ -287,6 +289,48 @@ void iommu_device_unregister(struct iommu_device *iommu)
|
287 | 289 | }
|
288 | 290 | EXPORT_SYMBOL_GPL(iommu_device_unregister);
|
289 | 291 |
|
| 292 | +#if IS_ENABLED(CONFIG_IOMMUFD_TEST) |
| 293 | +void iommu_device_unregister_bus(struct iommu_device *iommu, |
| 294 | + struct bus_type *bus, |
| 295 | + struct notifier_block *nb) |
| 296 | +{ |
| 297 | + bus_unregister_notifier(bus, nb); |
| 298 | + iommu_device_unregister(iommu); |
| 299 | +} |
| 300 | +EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); |
| 301 | + |
| 302 | +/* |
| 303 | + * Register an iommu driver against a single bus. This is only used by iommufd |
| 304 | + * selftest to create a mock iommu driver. The caller must provide |
| 305 | + * some memory to hold a notifier_block. |
| 306 | + */ |
| 307 | +int iommu_device_register_bus(struct iommu_device *iommu, |
| 308 | + const struct iommu_ops *ops, struct bus_type *bus, |
| 309 | + struct notifier_block *nb) |
| 310 | +{ |
| 311 | + int err; |
| 312 | + |
| 313 | + iommu->ops = ops; |
| 314 | + nb->notifier_call = iommu_bus_notifier; |
| 315 | + err = bus_register_notifier(bus, nb); |
| 316 | + if (err) |
| 317 | + return err; |
| 318 | + |
| 319 | + spin_lock(&iommu_device_lock); |
| 320 | + list_add_tail(&iommu->list, &iommu_device_list); |
| 321 | + spin_unlock(&iommu_device_lock); |
| 322 | + |
| 323 | + bus->iommu_ops = ops; |
| 324 | + err = bus_iommu_probe(bus); |
| 325 | + if (err) { |
| 326 | + iommu_device_unregister_bus(iommu, bus, nb); |
| 327 | + return err; |
| 328 | + } |
| 329 | + return 0; |
| 330 | +} |
| 331 | +EXPORT_SYMBOL_GPL(iommu_device_register_bus); |
| 332 | +#endif |
| 333 | + |
290 | 334 | static struct dev_iommu *dev_iommu_get(struct device *dev)
|
291 | 335 | {
|
292 | 336 | struct dev_iommu *param = dev->iommu;
|
@@ -2114,6 +2158,32 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
|
2114 | 2158 | }
|
2115 | 2159 | EXPORT_SYMBOL_GPL(iommu_attach_group);
|
2116 | 2160 |
|
| 2161 | +/** |
| 2162 | + * iommu_group_replace_domain - replace the domain that a group is attached to |
| 2163 | + * @new_domain: new IOMMU domain to replace with |
| 2164 | + * @group: IOMMU group that will be attached to the new domain |
| 2165 | + * |
| 2166 | + * This API allows the group to switch domains without being forced to go to |
| 2167 | + * the blocking domain in-between. |
| 2168 | + * |
| 2169 | + * If the currently attached domain is a core domain (e.g. a default_domain), |
| 2170 | + * it will act just like the iommu_attach_group(). |
| 2171 | + */ |
| 2172 | +int iommu_group_replace_domain(struct iommu_group *group, |
| 2173 | + struct iommu_domain *new_domain) |
| 2174 | +{ |
| 2175 | + int ret; |
| 2176 | + |
| 2177 | + if (!new_domain) |
| 2178 | + return -EINVAL; |
| 2179 | + |
| 2180 | + mutex_lock(&group->mutex); |
| 2181 | + ret = __iommu_group_set_domain(group, new_domain); |
| 2182 | + mutex_unlock(&group->mutex); |
| 2183 | + return ret; |
| 2184 | +} |
| 2185 | +EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL); |
| 2186 | + |
2117 | 2187 | static int __iommu_device_set_domain(struct iommu_group *group,
|
2118 | 2188 | struct device *dev,
|
2119 | 2189 | struct iommu_domain *new_domain,
|
@@ -2642,16 +2712,25 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain,
|
2642 | 2712 | }
|
2643 | 2713 | EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
|
2644 | 2714 |
|
| 2715 | +/** |
| 2716 | + * iommu_get_resv_regions - get reserved regions |
| 2717 | + * @dev: device for which to get reserved regions |
| 2718 | + * @list: reserved region list for device |
| 2719 | + * |
| 2720 | + * This returns a list of reserved IOVA regions specific to this device. |
| 2721 | + * A domain user should not map IOVA in these ranges. |
| 2722 | + */ |
2645 | 2723 | void iommu_get_resv_regions(struct device *dev, struct list_head *list)
|
2646 | 2724 | {
|
2647 | 2725 | const struct iommu_ops *ops = dev_iommu_ops(dev);
|
2648 | 2726 |
|
2649 | 2727 | if (ops->get_resv_regions)
|
2650 | 2728 | ops->get_resv_regions(dev, list);
|
2651 | 2729 | }
|
| 2730 | +EXPORT_SYMBOL_GPL(iommu_get_resv_regions); |
2652 | 2731 |
|
2653 | 2732 | /**
|
2654 |
| - * iommu_put_resv_regions - release resered regions |
| 2733 | + * iommu_put_resv_regions - release reserved regions |
2655 | 2734 | * @dev: device for which to free reserved regions
|
2656 | 2735 | * @list: reserved region list for device
|
2657 | 2736 | *
|
|
0 commit comments