Skip to content

Commit f410770

Browse files
Fenghua YuKAGA-KOKO
authored andcommitted
x86/intel_rdt: Update percpu closid immeditately on CPUs affected by changee
If CPUs are moved to or removed from a rdtgroup, the percpu closid storage is updated. If tasks running on an affected CPU use the percpu closid then the PQR_ASSOC MSR is only updated when the task runs through a context switch. Up to the context switch the CPUs operate on the wrong closid. This state is potentially unbound. Make the change immediately effective by invoking a smp function call on the affected CPUs which stores the new closid in the perpu storage and calls the rdt_sched_in() function which updates the MSR, if the current task uses the percpu closid. [ tglx: Made it work and massaged changelog once more ] Signed-off-by: Fenghua Yu <[email protected]> Cc: "Ravi V Shankar" <[email protected]> Cc: "Tony Luck" <[email protected]> Cc: "Sai Prakhya" <[email protected]> Cc: "Vikas Shivappa" <[email protected]> Cc: "Ingo Molnar" <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Thomas Gleixner <[email protected]>
1 parent c7cc0cc commit f410770

File tree

1 file changed

+36
-36
lines changed

1 file changed

+36
-36
lines changed

arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -191,12 +191,40 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
191191
return ret;
192192
}
193193

194+
/*
195+
* This is safe against intel_rdt_sched_in() called from __switch_to()
196+
* because __switch_to() is executed with interrupts disabled. A local call
197+
* from rdt_update_percpu_closid() is proteced against __switch_to() because
198+
* preemption is disabled.
199+
*/
200+
static void rdt_update_cpu_closid(void *v)
201+
{
202+
this_cpu_write(cpu_closid, *(int *)v);
203+
/*
204+
* We cannot unconditionally write the MSR because the current
205+
* executing task might have its own closid selected. Just reuse
206+
* the context switch code.
207+
*/
208+
intel_rdt_sched_in();
209+
}
210+
211+
/* Update the per cpu closid and eventually the PGR_ASSOC MSR */
212+
static void rdt_update_percpu_closid(const struct cpumask *cpu_mask, int closid)
213+
{
214+
int cpu = get_cpu();
215+
216+
if (cpumask_test_cpu(cpu, cpu_mask))
217+
rdt_update_cpu_closid(&closid);
218+
smp_call_function_many(cpu_mask, rdt_update_cpu_closid, &closid, 1);
219+
put_cpu();
220+
}
221+
194222
static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
195223
char *buf, size_t nbytes, loff_t off)
196224
{
197225
cpumask_var_t tmpmask, newmask;
198226
struct rdtgroup *rdtgrp, *r;
199-
int ret, cpu;
227+
int ret;
200228

201229
if (!buf)
202230
return -EINVAL;
@@ -236,8 +264,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
236264
/* Give any dropped cpus to rdtgroup_default */
237265
cpumask_or(&rdtgroup_default.cpu_mask,
238266
&rdtgroup_default.cpu_mask, tmpmask);
239-
for_each_cpu(cpu, tmpmask)
240-
per_cpu(cpu_closid, cpu) = 0;
267+
rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
241268
}
242269

243270
/*
@@ -251,8 +278,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
251278
continue;
252279
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
253280
}
254-
for_each_cpu(cpu, tmpmask)
255-
per_cpu(cpu_closid, cpu) = rdtgrp->closid;
281+
rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
256282
}
257283

258284
/* Done pushing/pulling - update this group with new mask */
@@ -780,41 +806,20 @@ static int reset_all_cbms(struct rdt_resource *r)
780806
return 0;
781807
}
782808

783-
/*
784-
* MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
785-
* are always in thread context.
786-
*/
787-
static void rdt_reset_pqr_assoc_closid(void *v)
788-
{
789-
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
790-
791-
state->closid = 0;
792-
wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
793-
}
794-
795809
/*
796810
* Forcibly remove all of subdirectories under root.
797811
*/
798812
static void rmdir_all_sub(void)
799813
{
800814
struct rdtgroup *rdtgrp, *tmp;
801815
struct task_struct *p, *t;
802-
int cpu;
803816

804817
/* move all tasks to default resource group */
805818
read_lock(&tasklist_lock);
806819
for_each_process_thread(p, t)
807820
t->closid = 0;
808821
read_unlock(&tasklist_lock);
809822

810-
get_cpu();
811-
/* Reset PQR_ASSOC MSR on this cpu. */
812-
rdt_reset_pqr_assoc_closid(NULL);
813-
/* Reset PQR_ASSOC MSR on the rest of cpus. */
814-
smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid,
815-
NULL, 1);
816-
put_cpu();
817-
818823
list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
819824
/* Remove each rdtgroup other than root */
820825
if (rdtgrp == &rdtgroup_default)
@@ -828,15 +833,13 @@ static void rmdir_all_sub(void)
828833
cpumask_or(&rdtgroup_default.cpu_mask,
829834
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
830835

836+
rdt_update_percpu_closid(&rdtgrp->cpu_mask,
837+
rdtgroup_default.closid);
838+
831839
kernfs_remove(rdtgrp->kn);
832840
list_del(&rdtgrp->rdtgroup_list);
833841
kfree(rdtgrp);
834842
}
835-
836-
/* Reset all per cpu closids to the default value */
837-
for_each_cpu(cpu, &rdtgroup_default.cpu_mask)
838-
per_cpu(cpu_closid, cpu) = 0;
839-
840843
kernfs_remove(kn_info);
841844
}
842845

@@ -943,7 +946,6 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
943946
{
944947
struct task_struct *p, *t;
945948
struct rdtgroup *rdtgrp;
946-
int cpu, ret = 0;
947949

948950
rdtgrp = rdtgroup_kn_lock_live(kn);
949951
if (!rdtgrp) {
@@ -962,8 +964,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
962964
/* Give any CPUs back to the default group */
963965
cpumask_or(&rdtgroup_default.cpu_mask,
964966
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
965-
for_each_cpu(cpu, &rdtgrp->cpu_mask)
966-
per_cpu(cpu_closid, cpu) = 0;
967+
rdt_update_percpu_closid(&rdtgrp->cpu_mask, rdtgroup_default.closid);
967968

968969
rdtgrp->flags = RDT_DELETED;
969970
closid_free(rdtgrp->closid);
@@ -977,8 +978,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
977978
kernfs_remove(rdtgrp->kn);
978979

979980
rdtgroup_kn_unlock(kn);
980-
981-
return ret;
981+
return 0;
982982
}
983983

984984
static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {

0 commit comments

Comments
 (0)