Skip to content

Commit f598a49

Browse files
John Garryjoergroedel
authored andcommitted
iova: Add CPU hotplug handler to flush rcaches
Like the Intel IOMMU driver already does, flush the per-IOVA domain CPU rcache when a CPU goes offline - there's no point in keeping it. Reviewed-by: Robin Murphy <[email protected]> Signed-off-by: John Garry <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 371d795 commit f598a49

File tree

3 files changed

+31
-1
lines changed

3 files changed

+31
-1
lines changed

drivers/iommu/iova.c

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad);
2525
static void free_iova_rcaches(struct iova_domain *iovad);
2626
static void fq_destroy_all_entries(struct iova_domain *iovad);
2727
static void fq_flush_timeout(struct timer_list *t);
28+
29+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
30+
{
31+
struct iova_domain *iovad;
32+
33+
iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
34+
35+
free_cpu_cached_iovas(cpu, iovad);
36+
return 0;
37+
}
38+
2839
static void free_global_cached_iovas(struct iova_domain *iovad);
2940

3041
static struct iova *to_iova(struct rb_node *node)
@@ -56,6 +67,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
5667
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
5768
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
5869
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
70+
cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
5971
init_iova_rcaches(iovad);
6072
}
6173
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -299,10 +311,21 @@ int iova_cache_get(void)
299311
{
300312
mutex_lock(&iova_cache_mutex);
301313
if (!iova_cache_users) {
314+
int ret;
315+
316+
ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
317+
iova_cpuhp_dead);
318+
if (ret) {
319+
mutex_unlock(&iova_cache_mutex);
320+
pr_err("Couldn't register cpuhp handler\n");
321+
return ret;
322+
}
323+
302324
iova_cache = kmem_cache_create(
303325
"iommu_iova", sizeof(struct iova), 0,
304326
SLAB_HWCACHE_ALIGN, NULL);
305327
if (!iova_cache) {
328+
cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
306329
mutex_unlock(&iova_cache_mutex);
307330
pr_err("Couldn't create iova cache\n");
308331
return -ENOMEM;
@@ -324,8 +347,10 @@ void iova_cache_put(void)
324347
return;
325348
}
326349
iova_cache_users--;
327-
if (!iova_cache_users)
350+
if (!iova_cache_users) {
351+
cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
328352
kmem_cache_destroy(iova_cache);
353+
}
329354
mutex_unlock(&iova_cache_mutex);
330355
}
331356
EXPORT_SYMBOL_GPL(iova_cache_put);
@@ -648,6 +673,9 @@ void put_iova_domain(struct iova_domain *iovad)
648673
{
649674
struct iova *iova, *tmp;
650675

676+
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
677+
&iovad->cpuhp_dead);
678+
651679
free_iova_flush_queue(iovad);
652680
free_iova_rcaches(iovad);
653681
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)

include/linux/cpuhotplug.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ enum cpuhp_state {
5858
CPUHP_NET_DEV_DEAD,
5959
CPUHP_PCI_XGENE_DEAD,
6060
CPUHP_IOMMU_INTEL_DEAD,
61+
CPUHP_IOMMU_IOVA_DEAD,
6162
CPUHP_LUSTRE_CFS_DEAD,
6263
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
6364
CPUHP_PADATA_DEAD,

include/linux/iova.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ struct iova_domain {
9595
flush-queues */
9696
atomic_t fq_timer_on; /* 1 when timer is active, 0
9797
when not */
98+
struct hlist_node cpuhp_dead;
9899
};
99100

100101
static inline unsigned long iova_size(struct iova *iova)

0 commit comments

Comments
 (0)