@@ -18,6 +18,12 @@ enum scx_consts {
18
18
SCX_EXIT_DUMP_DFL_LEN = 32768 ,
19
19
20
20
SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE ,
21
+
22
+ /*
23
+ * Iterating all tasks may take a while. Periodically drop
24
+ * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25
+ */
26
+ SCX_OPS_TASK_ITER_BATCH = 32 ,
21
27
};
22
28
23
29
enum scx_exit_kind {
@@ -1273,6 +1279,7 @@ struct scx_task_iter {
1273
1279
struct task_struct * locked ;
1274
1280
struct rq * rq ;
1275
1281
struct rq_flags rf ;
1282
+ u32 cnt ;
1276
1283
};
1277
1284
1278
1285
/**
@@ -1301,6 +1308,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
1301
1308
iter -> cursor = (struct sched_ext_entity ){ .flags = SCX_TASK_CURSOR };
1302
1309
list_add (& iter -> cursor .tasks_node , & scx_tasks );
1303
1310
iter -> locked = NULL ;
1311
+ iter -> cnt = 0 ;
1304
1312
}
1305
1313
1306
1314
static void __scx_task_iter_rq_unlock (struct scx_task_iter * iter )
@@ -1355,14 +1363,21 @@ static void scx_task_iter_stop(struct scx_task_iter *iter)
1355
1363
* scx_task_iter_next - Next task
1356
1364
* @iter: iterator to walk
1357
1365
*
1358
- * Visit the next task. See scx_task_iter_start() for details.
1366
+ * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1367
+ * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1368
+ * stalls by holding scx_tasks_lock for too long.
1359
1369
*/
1360
1370
static struct task_struct * scx_task_iter_next (struct scx_task_iter * iter )
1361
1371
{
1362
1372
struct list_head * cursor = & iter -> cursor .tasks_node ;
1363
1373
struct sched_ext_entity * pos ;
1364
1374
1365
- lockdep_assert_held (& scx_tasks_lock );
1375
+ if (!(++ iter -> cnt % SCX_OPS_TASK_ITER_BATCH )) {
1376
+ scx_task_iter_unlock (iter );
1377
+ cpu_relax ();
1378
+ cond_resched ();
1379
+ scx_task_iter_relock (iter );
1380
+ }
1366
1381
1367
1382
list_for_each_entry (pos , cursor , tasks_node ) {
1368
1383
if (& pos -> tasks_node == & scx_tasks )
0 commit comments