@@ -1276,76 +1276,86 @@ struct scx_task_iter {
1276
1276
};
1277
1277
1278
1278
/**
1279
- * scx_task_iter_init - Initialize a task iterator
1279
+ * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1280
1280
* @iter: iterator to init
1281
1281
*
1282
- * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
1283
- * @iter must eventually be exited with scx_task_iter_exit ().
1282
+ * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1283
+ * must eventually be stopped with scx_task_iter_stop ().
1284
1284
*
1285
- * scx_tasks_lock may be released between this and the first next() call or
1286
- * between any two next() calls. If scx_tasks_lock is released between two
1287
- * next() calls, the caller is responsible for ensuring that the task being
1288
- * iterated remains accessible either through RCU read lock or obtaining a
1289
- * reference count.
1285
+ * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1286
+ * between this and the first next() call or between any two next() calls. If
1287
+ * the locks are released between two next() calls, the caller is responsible
1288
+ * for ensuring that the task being iterated remains accessible either through
1289
+ * RCU read lock or obtaining a reference count.
1290
1290
*
1291
1291
* All tasks which existed when the iteration started are guaranteed to be
1292
1292
* visited as long as they still exist.
1293
1293
*/
1294
- static void scx_task_iter_init (struct scx_task_iter * iter )
1294
+ static void scx_task_iter_start (struct scx_task_iter * iter )
1295
1295
{
1296
- lockdep_assert_held (& scx_tasks_lock );
1297
-
1298
1296
BUILD_BUG_ON (__SCX_DSQ_ITER_ALL_FLAGS &
1299
1297
((1U << __SCX_DSQ_LNODE_PRIV_SHIFT ) - 1 ));
1300
1298
1299
+ spin_lock_irq (& scx_tasks_lock );
1300
+
1301
1301
iter -> cursor = (struct sched_ext_entity ){ .flags = SCX_TASK_CURSOR };
1302
1302
list_add (& iter -> cursor .tasks_node , & scx_tasks );
1303
1303
iter -> locked = NULL ;
1304
1304
}
1305
1305
1306
+ static void __scx_task_iter_rq_unlock (struct scx_task_iter * iter )
1307
+ {
1308
+ if (iter -> locked ) {
1309
+ task_rq_unlock (iter -> rq , iter -> locked , & iter -> rf );
1310
+ iter -> locked = NULL ;
1311
+ }
1312
+ }
1313
+
1306
1314
/**
1307
- * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
1308
- * @iter: iterator to unlock rq for
1315
+ * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1316
+ * @iter: iterator to unlock
1309
1317
*
1310
1318
* If @iter is in the middle of a locked iteration, it may be locking the rq of
1311
- * the task currently being visited. Unlock the rq if so. This function can be
1312
- * safely called anytime during an iteration.
1319
+ * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1320
+ * This function can be safely called anytime during an iteration.
1321
+ */
1322
+ static void scx_task_iter_unlock (struct scx_task_iter * iter )
1323
+ {
1324
+ __scx_task_iter_rq_unlock (iter );
1325
+ spin_unlock_irq (& scx_tasks_lock );
1326
+ }
1327
+
1328
+ /**
1329
+ * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1330
+ * @iter: iterator to re-lock
1313
1331
*
1314
- * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
1315
- * not locking an rq.
1332
+ * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1333
+ * doesn't re-lock the rq lock. Must be called before other iterator operations .
1316
1334
*/
1317
- static bool scx_task_iter_rq_unlock (struct scx_task_iter * iter )
1335
+ static void scx_task_iter_relock (struct scx_task_iter * iter )
1318
1336
{
1319
- if (iter -> locked ) {
1320
- task_rq_unlock (iter -> rq , iter -> locked , & iter -> rf );
1321
- iter -> locked = NULL ;
1322
- return true;
1323
- } else {
1324
- return false;
1325
- }
1337
+ spin_lock_irq (& scx_tasks_lock );
1326
1338
}
1327
1339
1328
1340
/**
1329
- * scx_task_iter_exit - Exit a task iterator
1341
+ * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1330
1342
* @iter: iterator to exit
1331
1343
*
1332
- * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
1333
- * If the iterator holds a task's rq lock, that rq lock is released. See
1334
- * scx_task_iter_init () for details.
1344
+ * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1345
+ * which is released on return. If the iterator holds a task's rq lock, that rq
1346
+ * lock is also released. See scx_task_iter_start () for details.
1335
1347
*/
1336
- static void scx_task_iter_exit (struct scx_task_iter * iter )
1348
+ static void scx_task_iter_stop (struct scx_task_iter * iter )
1337
1349
{
1338
- lockdep_assert_held (& scx_tasks_lock );
1339
-
1340
- scx_task_iter_rq_unlock (iter );
1341
1350
list_del_init (& iter -> cursor .tasks_node );
1351
+ scx_task_iter_unlock (iter );
1342
1352
}
1343
1353
1344
1354
/**
1345
1355
* scx_task_iter_next - Next task
1346
1356
* @iter: iterator to walk
1347
1357
*
1348
- * Visit the next task. See scx_task_iter_init () for details.
1358
+ * Visit the next task. See scx_task_iter_start () for details.
1349
1359
*/
1350
1360
static struct task_struct * scx_task_iter_next (struct scx_task_iter * iter )
1351
1361
{
@@ -1373,14 +1383,14 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1373
1383
* @include_dead: Whether we should include dead tasks in the iteration
1374
1384
*
1375
1385
* Visit the non-idle task with its rq lock held. Allows callers to specify
1376
- * whether they would like to filter out dead tasks. See scx_task_iter_init ()
1386
+ * whether they would like to filter out dead tasks. See scx_task_iter_start ()
1377
1387
* for details.
1378
1388
*/
1379
1389
static struct task_struct * scx_task_iter_next_locked (struct scx_task_iter * iter )
1380
1390
{
1381
1391
struct task_struct * p ;
1382
1392
1383
- scx_task_iter_rq_unlock (iter );
1393
+ __scx_task_iter_rq_unlock (iter );
1384
1394
1385
1395
while ((p = scx_task_iter_next (iter ))) {
1386
1396
/*
@@ -4462,8 +4472,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
4462
4472
4463
4473
scx_ops_init_task_enabled = false;
4464
4474
4465
- spin_lock_irq (& scx_tasks_lock );
4466
- scx_task_iter_init (& sti );
4475
+ scx_task_iter_start (& sti );
4467
4476
while ((p = scx_task_iter_next_locked (& sti ))) {
4468
4477
const struct sched_class * old_class = p -> sched_class ;
4469
4478
struct sched_enq_and_set_ctx ctx ;
@@ -4478,8 +4487,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
4478
4487
check_class_changed (task_rq (p ), p , old_class , p -> prio );
4479
4488
scx_ops_exit_task (p );
4480
4489
}
4481
- scx_task_iter_exit (& sti );
4482
- spin_unlock_irq (& scx_tasks_lock );
4490
+ scx_task_iter_stop (& sti );
4483
4491
percpu_up_write (& scx_fork_rwsem );
4484
4492
4485
4493
/* no task is on scx, turn off all the switches and flush in-progress calls */
@@ -5130,8 +5138,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5130
5138
if (ret )
5131
5139
goto err_disable_unlock_all ;
5132
5140
5133
- spin_lock_irq (& scx_tasks_lock );
5134
- scx_task_iter_init (& sti );
5141
+ scx_task_iter_start (& sti );
5135
5142
while ((p = scx_task_iter_next_locked (& sti ))) {
5136
5143
/*
5137
5144
* @p may already be dead, have lost all its usages counts and
@@ -5141,15 +5148,13 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5141
5148
if (!tryget_task_struct (p ))
5142
5149
continue ;
5143
5150
5144
- scx_task_iter_rq_unlock (& sti );
5145
- spin_unlock_irq (& scx_tasks_lock );
5151
+ scx_task_iter_unlock (& sti );
5146
5152
5147
5153
ret = scx_ops_init_task (p , task_group (p ), false);
5148
5154
if (ret ) {
5149
5155
put_task_struct (p );
5150
- spin_lock_irq (& scx_tasks_lock );
5151
- scx_task_iter_exit (& sti );
5152
- spin_unlock_irq (& scx_tasks_lock );
5156
+ scx_task_iter_relock (& sti );
5157
+ scx_task_iter_stop (& sti );
5153
5158
scx_ops_error ("ops.init_task() failed (%d) for %s[%d]" ,
5154
5159
ret , p -> comm , p -> pid );
5155
5160
goto err_disable_unlock_all ;
@@ -5158,10 +5163,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5158
5163
scx_set_task_state (p , SCX_TASK_READY );
5159
5164
5160
5165
put_task_struct (p );
5161
- spin_lock_irq ( & scx_tasks_lock );
5166
+ scx_task_iter_relock ( & sti );
5162
5167
}
5163
- scx_task_iter_exit (& sti );
5164
- spin_unlock_irq (& scx_tasks_lock );
5168
+ scx_task_iter_stop (& sti );
5165
5169
scx_cgroup_unlock ();
5166
5170
percpu_up_write (& scx_fork_rwsem );
5167
5171
@@ -5178,8 +5182,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5178
5182
* scx_tasks_lock.
5179
5183
*/
5180
5184
percpu_down_write (& scx_fork_rwsem );
5181
- spin_lock_irq (& scx_tasks_lock );
5182
- scx_task_iter_init (& sti );
5185
+ scx_task_iter_start (& sti );
5183
5186
while ((p = scx_task_iter_next_locked (& sti ))) {
5184
5187
const struct sched_class * old_class = p -> sched_class ;
5185
5188
struct sched_enq_and_set_ctx ctx ;
@@ -5194,8 +5197,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5194
5197
5195
5198
check_class_changed (task_rq (p ), p , old_class , p -> prio );
5196
5199
}
5197
- scx_task_iter_exit (& sti );
5198
- spin_unlock_irq (& scx_tasks_lock );
5200
+ scx_task_iter_stop (& sti );
5199
5201
percpu_up_write (& scx_fork_rwsem );
5200
5202
5201
5203
scx_ops_bypass (false);
0 commit comments