@@ -194,12 +194,13 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
194
194
/*
195
195
* This is safe against intel_rdt_sched_in() called from __switch_to()
196
196
* because __switch_to() is executed with interrupts disabled. A local call
197
- * from rdt_update_percpu_closid () is proteced against __switch_to() because
197
+ * from rdt_update_closid () is proteced against __switch_to() because
198
198
* preemption is disabled.
199
199
*/
200
- static void rdt_update_cpu_closid (void * v )
200
+ static void rdt_update_cpu_closid (void * closid )
201
201
{
202
- this_cpu_write (cpu_closid , * (int * )v );
202
+ if (closid )
203
+ this_cpu_write (cpu_closid , * (int * )closid );
203
204
/*
204
205
* We cannot unconditionally write the MSR because the current
205
206
* executing task might have its own closid selected. Just reuse
@@ -208,14 +209,23 @@ static void rdt_update_cpu_closid(void *v)
208
209
intel_rdt_sched_in ();
209
210
}
210
211
211
- /* Update the per cpu closid and eventually the PGR_ASSOC MSR */
212
- static void rdt_update_percpu_closid (const struct cpumask * cpu_mask , int closid )
212
+ /*
213
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
214
+ *
215
+ * Per task closids must have been set up before calling this function.
216
+ *
217
+ * The per cpu closids are updated with the smp function call, when @closid
218
+ * is not NULL. If @closid is NULL then all affected percpu closids must
219
+ * have been set up before calling this function.
220
+ */
221
+ static void
222
+ rdt_update_closid (const struct cpumask * cpu_mask , int * closid )
213
223
{
214
224
int cpu = get_cpu ();
215
225
216
226
if (cpumask_test_cpu (cpu , cpu_mask ))
217
- rdt_update_cpu_closid (& closid );
218
- smp_call_function_many (cpu_mask , rdt_update_cpu_closid , & closid , 1 );
227
+ rdt_update_cpu_closid (closid );
228
+ smp_call_function_many (cpu_mask , rdt_update_cpu_closid , closid , 1 );
219
229
put_cpu ();
220
230
}
221
231
@@ -264,7 +274,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
264
274
/* Give any dropped cpus to rdtgroup_default */
265
275
cpumask_or (& rdtgroup_default .cpu_mask ,
266
276
& rdtgroup_default .cpu_mask , tmpmask );
267
- rdt_update_percpu_closid (tmpmask , rdtgroup_default .closid );
277
+ rdt_update_closid (tmpmask , & rdtgroup_default .closid );
268
278
}
269
279
270
280
/*
@@ -278,7 +288,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
278
288
continue ;
279
289
cpumask_andnot (& r -> cpu_mask , & r -> cpu_mask , tmpmask );
280
290
}
281
- rdt_update_percpu_closid (tmpmask , rdtgrp -> closid );
291
+ rdt_update_closid (tmpmask , & rdtgrp -> closid );
282
292
}
283
293
284
294
/* Done pushing/pulling - update this group with new mask */
@@ -807,18 +817,49 @@ static int reset_all_cbms(struct rdt_resource *r)
807
817
}
808
818
809
819
/*
810
- * Forcibly remove all of subdirectories under root.
820
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
821
+ * in the systems are moved unconditionally (used for teardown).
822
+ *
823
+ * If @mask is not NULL the cpus on which moved tasks are running are set
824
+ * in that mask so the update smp function call is restricted to affected
825
+ * cpus.
811
826
*/
812
- static void rmdir_all_sub (void )
827
+ static void rdt_move_group_tasks (struct rdtgroup * from , struct rdtgroup * to ,
828
+ struct cpumask * mask )
813
829
{
814
- struct rdtgroup * rdtgrp , * tmp ;
815
830
struct task_struct * p , * t ;
816
831
817
- /* move all tasks to default resource group */
818
832
read_lock (& tasklist_lock );
819
- for_each_process_thread (p , t )
820
- t -> closid = 0 ;
833
+ for_each_process_thread (p , t ) {
834
+ if (!from || t -> closid == from -> closid ) {
835
+ t -> closid = to -> closid ;
836
+ #ifdef CONFIG_SMP
837
+ /*
838
+ * This is safe on x86 w/o barriers as the ordering
839
+ * of writing to task_cpu() and t->on_cpu is
840
+ * reverse to the reading here. The detection is
841
+ * inaccurate as tasks might move or schedule
842
+ * before the smp function call takes place. In
843
+ * such a case the function call is pointless, but
844
+ * there is no other side effect.
845
+ */
846
+ if (mask && t -> on_cpu )
847
+ cpumask_set_cpu (task_cpu (t ), mask );
848
+ #endif
849
+ }
850
+ }
821
851
read_unlock (& tasklist_lock );
852
+ }
853
+
854
+ /*
855
+ * Forcibly remove all of subdirectories under root.
856
+ */
857
+ static void rmdir_all_sub (void )
858
+ {
859
+ struct rdtgroup * rdtgrp , * tmp ;
860
+
861
+ /* Move all tasks to the default resource group */
862
+ rdt_move_group_tasks (NULL , & rdtgroup_default , NULL );
822
863
823
864
list_for_each_entry_safe (rdtgrp , tmp , & rdt_all_groups , rdtgroup_list ) {
824
865
/* Remove each rdtgroup other than root */
@@ -833,13 +874,15 @@ static void rmdir_all_sub(void)
833
874
cpumask_or (& rdtgroup_default .cpu_mask ,
834
875
& rdtgroup_default .cpu_mask , & rdtgrp -> cpu_mask );
835
876
836
- rdt_update_percpu_closid (& rdtgrp -> cpu_mask ,
837
- rdtgroup_default .closid );
838
-
839
877
kernfs_remove (rdtgrp -> kn );
840
878
list_del (& rdtgrp -> rdtgroup_list );
841
879
kfree (rdtgrp );
842
880
}
881
+ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
882
+ get_online_cpus ();
883
+ rdt_update_closid (cpu_online_mask , & rdtgroup_default .closid );
884
+ put_online_cpus ();
885
+
843
886
kernfs_remove (kn_info );
844
887
}
845
888
@@ -944,27 +987,35 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
944
987
945
988
static int rdtgroup_rmdir (struct kernfs_node * kn )
946
989
{
947
- struct task_struct * p , * t ;
990
+ int ret , cpu , closid = rdtgroup_default . closid ;
948
991
struct rdtgroup * rdtgrp ;
992
+ cpumask_var_t tmpmask ;
993
+
994
+ if (!zalloc_cpumask_var (& tmpmask , GFP_KERNEL ))
995
+ return - ENOMEM ;
949
996
950
997
rdtgrp = rdtgroup_kn_lock_live (kn );
951
998
if (!rdtgrp ) {
952
- rdtgroup_kn_unlock ( kn ) ;
953
- return - EPERM ;
999
+ ret = - EPERM ;
1000
+ goto out ;
954
1001
}
955
1002
956
1003
/* Give any tasks back to the default group */
957
- read_lock (& tasklist_lock );
958
- for_each_process_thread (p , t ) {
959
- if (t -> closid == rdtgrp -> closid )
960
- t -> closid = 0 ;
961
- }
962
- read_unlock (& tasklist_lock );
1004
+ rdt_move_group_tasks (rdtgrp , & rdtgroup_default , tmpmask );
963
1005
964
1006
/* Give any CPUs back to the default group */
965
1007
cpumask_or (& rdtgroup_default .cpu_mask ,
966
1008
& rdtgroup_default .cpu_mask , & rdtgrp -> cpu_mask );
967
- rdt_update_percpu_closid (& rdtgrp -> cpu_mask , rdtgroup_default .closid );
1009
+
1010
+ /* Update per cpu closid of the moved CPUs first */
1011
+ for_each_cpu (cpu , & rdtgrp -> cpu_mask )
1012
+ per_cpu (cpu_closid , cpu ) = closid ;
1013
+ /*
1014
+ * Update the MSR on moved CPUs and CPUs which have moved
1015
+ * task running on them.
1016
+ */
1017
+ cpumask_or (tmpmask , tmpmask , & rdtgrp -> cpu_mask );
1018
+ rdt_update_closid (tmpmask , NULL );
968
1019
969
1020
rdtgrp -> flags = RDT_DELETED ;
970
1021
closid_free (rdtgrp -> closid );
@@ -976,9 +1027,11 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
976
1027
*/
977
1028
kernfs_get (kn );
978
1029
kernfs_remove (rdtgrp -> kn );
979
-
1030
+ ret = 0 ;
1031
+ out :
980
1032
rdtgroup_kn_unlock (kn );
981
- return 0 ;
1033
+ free_cpumask_var (tmpmask );
1034
+ return ret ;
982
1035
}
983
1036
984
1037
static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
0 commit comments