@@ -869,36 +869,6 @@ static bool too_many_workers(struct worker_pool *pool)
869
869
return nr_idle > 2 && (nr_idle - 2 ) * MAX_IDLE_WORKERS_RATIO >= nr_busy ;
870
870
}
871
871
872
- /*
873
- * Wake up functions.
874
- */
875
-
876
- /* Return the first idle worker. Called with pool->lock held. */
877
- static struct worker * first_idle_worker (struct worker_pool * pool )
878
- {
879
- if (unlikely (list_empty (& pool -> idle_list )))
880
- return NULL ;
881
-
882
- return list_first_entry (& pool -> idle_list , struct worker , entry );
883
- }
884
-
885
- /**
886
- * wake_up_worker - wake up an idle worker
887
- * @pool: worker pool to wake worker from
888
- *
889
- * Wake up the first idle worker of @pool.
890
- *
891
- * CONTEXT:
892
- * raw_spin_lock_irq(pool->lock).
893
- */
894
- static void wake_up_worker (struct worker_pool * pool )
895
- {
896
- struct worker * worker = first_idle_worker (pool );
897
-
898
- if (likely (worker ))
899
- wake_up_process (worker -> task );
900
- }
901
-
902
872
/**
903
873
* worker_set_flags - set worker flags and adjust nr_running accordingly
904
874
* @worker: self
@@ -947,6 +917,174 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
947
917
pool -> nr_running ++ ;
948
918
}
949
919
920
+ /* Return the first idle worker. Called with pool->lock held. */
921
+ static struct worker * first_idle_worker (struct worker_pool * pool )
922
+ {
923
+ if (unlikely (list_empty (& pool -> idle_list )))
924
+ return NULL ;
925
+
926
+ return list_first_entry (& pool -> idle_list , struct worker , entry );
927
+ }
928
+
929
+ /**
930
+ * worker_enter_idle - enter idle state
931
+ * @worker: worker which is entering idle state
932
+ *
933
+ * @worker is entering idle state. Update stats and idle timer if
934
+ * necessary.
935
+ *
936
+ * LOCKING:
937
+ * raw_spin_lock_irq(pool->lock).
938
+ */
939
+ static void worker_enter_idle (struct worker * worker )
940
+ {
941
+ struct worker_pool * pool = worker -> pool ;
942
+
943
+ if (WARN_ON_ONCE (worker -> flags & WORKER_IDLE ) ||
944
+ WARN_ON_ONCE (!list_empty (& worker -> entry ) &&
945
+ (worker -> hentry .next || worker -> hentry .pprev )))
946
+ return ;
947
+
948
+ /* can't use worker_set_flags(), also called from create_worker() */
949
+ worker -> flags |= WORKER_IDLE ;
950
+ pool -> nr_idle ++ ;
951
+ worker -> last_active = jiffies ;
952
+
953
+ /* idle_list is LIFO */
954
+ list_add (& worker -> entry , & pool -> idle_list );
955
+
956
+ if (too_many_workers (pool ) && !timer_pending (& pool -> idle_timer ))
957
+ mod_timer (& pool -> idle_timer , jiffies + IDLE_WORKER_TIMEOUT );
958
+
959
+ /* Sanity check nr_running. */
960
+ WARN_ON_ONCE (pool -> nr_workers == pool -> nr_idle && pool -> nr_running );
961
+ }
962
+
963
+ /**
964
+ * worker_leave_idle - leave idle state
965
+ * @worker: worker which is leaving idle state
966
+ *
967
+ * @worker is leaving idle state. Update stats.
968
+ *
969
+ * LOCKING:
970
+ * raw_spin_lock_irq(pool->lock).
971
+ */
972
+ static void worker_leave_idle (struct worker * worker )
973
+ {
974
+ struct worker_pool * pool = worker -> pool ;
975
+
976
+ if (WARN_ON_ONCE (!(worker -> flags & WORKER_IDLE )))
977
+ return ;
978
+ worker_clr_flags (worker , WORKER_IDLE );
979
+ pool -> nr_idle -- ;
980
+ list_del_init (& worker -> entry );
981
+ }
982
+
983
+ /**
984
+ * find_worker_executing_work - find worker which is executing a work
985
+ * @pool: pool of interest
986
+ * @work: work to find worker for
987
+ *
988
+ * Find a worker which is executing @work on @pool by searching
989
+ * @pool->busy_hash which is keyed by the address of @work. For a worker
990
+ * to match, its current execution should match the address of @work and
991
+ * its work function. This is to avoid unwanted dependency between
992
+ * unrelated work executions through a work item being recycled while still
993
+ * being executed.
994
+ *
995
+ * This is a bit tricky. A work item may be freed once its execution
996
+ * starts and nothing prevents the freed area from being recycled for
997
+ * another work item. If the same work item address ends up being reused
998
+ * before the original execution finishes, workqueue will identify the
999
+ * recycled work item as currently executing and make it wait until the
1000
+ * current execution finishes, introducing an unwanted dependency.
1001
+ *
1002
+ * This function checks the work item address and work function to avoid
1003
+ * false positives. Note that this isn't complete as one may construct a
1004
+ * work function which can introduce dependency onto itself through a
1005
+ * recycled work item. Well, if somebody wants to shoot oneself in the
1006
+ * foot that badly, there's only so much we can do, and if such deadlock
1007
+ * actually occurs, it should be easy to locate the culprit work function.
1008
+ *
1009
+ * CONTEXT:
1010
+ * raw_spin_lock_irq(pool->lock).
1011
+ *
1012
+ * Return:
1013
+ * Pointer to worker which is executing @work if found, %NULL
1014
+ * otherwise.
1015
+ */
1016
+ static struct worker * find_worker_executing_work (struct worker_pool * pool ,
1017
+ struct work_struct * work )
1018
+ {
1019
+ struct worker * worker ;
1020
+
1021
+ hash_for_each_possible (pool -> busy_hash , worker , hentry ,
1022
+ (unsigned long )work )
1023
+ if (worker -> current_work == work &&
1024
+ worker -> current_func == work -> func )
1025
+ return worker ;
1026
+
1027
+ return NULL ;
1028
+ }
1029
+
1030
+ /**
1031
+ * move_linked_works - move linked works to a list
1032
+ * @work: start of series of works to be scheduled
1033
+ * @head: target list to append @work to
1034
+ * @nextp: out parameter for nested worklist walking
1035
+ *
1036
+ * Schedule linked works starting from @work to @head. Work series to
1037
+ * be scheduled starts at @work and includes any consecutive work with
1038
+ * WORK_STRUCT_LINKED set in its predecessor.
1039
+ *
1040
+ * If @nextp is not NULL, it's updated to point to the next work of
1041
+ * the last scheduled work. This allows move_linked_works() to be
1042
+ * nested inside outer list_for_each_entry_safe().
1043
+ *
1044
+ * CONTEXT:
1045
+ * raw_spin_lock_irq(pool->lock).
1046
+ */
1047
+ static void move_linked_works (struct work_struct * work , struct list_head * head ,
1048
+ struct work_struct * * nextp )
1049
+ {
1050
+ struct work_struct * n ;
1051
+
1052
+ /*
1053
+ * Linked worklist will always end before the end of the list,
1054
+ * use NULL for list head.
1055
+ */
1056
+ list_for_each_entry_safe_from (work , n , NULL , entry ) {
1057
+ list_move_tail (& work -> entry , head );
1058
+ if (!(* work_data_bits (work ) & WORK_STRUCT_LINKED ))
1059
+ break ;
1060
+ }
1061
+
1062
+ /*
1063
+ * If we're already inside safe list traversal and have moved
1064
+ * multiple works to the scheduled queue, the next position
1065
+ * needs to be updated.
1066
+ */
1067
+ if (nextp )
1068
+ * nextp = n ;
1069
+ }
1070
+
1071
+ /**
1072
+ * wake_up_worker - wake up an idle worker
1073
+ * @pool: worker pool to wake worker from
1074
+ *
1075
+ * Wake up the first idle worker of @pool.
1076
+ *
1077
+ * CONTEXT:
1078
+ * raw_spin_lock_irq(pool->lock).
1079
+ */
1080
+ static void wake_up_worker (struct worker_pool * pool )
1081
+ {
1082
+ struct worker * worker = first_idle_worker (pool );
1083
+
1084
+ if (likely (worker ))
1085
+ wake_up_process (worker -> task );
1086
+ }
1087
+
950
1088
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
951
1089
952
1090
/*
@@ -1202,94 +1340,6 @@ work_func_t wq_worker_last_func(struct task_struct *task)
1202
1340
return worker -> last_func ;
1203
1341
}
1204
1342
1205
- /**
1206
- * find_worker_executing_work - find worker which is executing a work
1207
- * @pool: pool of interest
1208
- * @work: work to find worker for
1209
- *
1210
- * Find a worker which is executing @work on @pool by searching
1211
- * @pool->busy_hash which is keyed by the address of @work. For a worker
1212
- * to match, its current execution should match the address of @work and
1213
- * its work function. This is to avoid unwanted dependency between
1214
- * unrelated work executions through a work item being recycled while still
1215
- * being executed.
1216
- *
1217
- * This is a bit tricky. A work item may be freed once its execution
1218
- * starts and nothing prevents the freed area from being recycled for
1219
- * another work item. If the same work item address ends up being reused
1220
- * before the original execution finishes, workqueue will identify the
1221
- * recycled work item as currently executing and make it wait until the
1222
- * current execution finishes, introducing an unwanted dependency.
1223
- *
1224
- * This function checks the work item address and work function to avoid
1225
- * false positives. Note that this isn't complete as one may construct a
1226
- * work function which can introduce dependency onto itself through a
1227
- * recycled work item. Well, if somebody wants to shoot oneself in the
1228
- * foot that badly, there's only so much we can do, and if such deadlock
1229
- * actually occurs, it should be easy to locate the culprit work function.
1230
- *
1231
- * CONTEXT:
1232
- * raw_spin_lock_irq(pool->lock).
1233
- *
1234
- * Return:
1235
- * Pointer to worker which is executing @work if found, %NULL
1236
- * otherwise.
1237
- */
1238
- static struct worker * find_worker_executing_work (struct worker_pool * pool ,
1239
- struct work_struct * work )
1240
- {
1241
- struct worker * worker ;
1242
-
1243
- hash_for_each_possible (pool -> busy_hash , worker , hentry ,
1244
- (unsigned long )work )
1245
- if (worker -> current_work == work &&
1246
- worker -> current_func == work -> func )
1247
- return worker ;
1248
-
1249
- return NULL ;
1250
- }
1251
-
1252
- /**
1253
- * move_linked_works - move linked works to a list
1254
- * @work: start of series of works to be scheduled
1255
- * @head: target list to append @work to
1256
- * @nextp: out parameter for nested worklist walking
1257
- *
1258
- * Schedule linked works starting from @work to @head. Work series to
1259
- * be scheduled starts at @work and includes any consecutive work with
1260
- * WORK_STRUCT_LINKED set in its predecessor.
1261
- *
1262
- * If @nextp is not NULL, it's updated to point to the next work of
1263
- * the last scheduled work. This allows move_linked_works() to be
1264
- * nested inside outer list_for_each_entry_safe().
1265
- *
1266
- * CONTEXT:
1267
- * raw_spin_lock_irq(pool->lock).
1268
- */
1269
- static void move_linked_works (struct work_struct * work , struct list_head * head ,
1270
- struct work_struct * * nextp )
1271
- {
1272
- struct work_struct * n ;
1273
-
1274
- /*
1275
- * Linked worklist will always end before the end of the list,
1276
- * use NULL for list head.
1277
- */
1278
- list_for_each_entry_safe_from (work , n , NULL , entry ) {
1279
- list_move_tail (& work -> entry , head );
1280
- if (!(* work_data_bits (work ) & WORK_STRUCT_LINKED ))
1281
- break ;
1282
- }
1283
-
1284
- /*
1285
- * If we're already inside safe list traversal and have moved
1286
- * multiple works to the scheduled queue, the next position
1287
- * needs to be updated.
1288
- */
1289
- if (nextp )
1290
- * nextp = n ;
1291
- }
1292
-
1293
1343
/**
1294
1344
* get_pwq - get an extra reference on the specified pool_workqueue
1295
1345
* @pwq: pool_workqueue to get
@@ -1974,60 +2024,6 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1974
2024
}
1975
2025
EXPORT_SYMBOL (queue_rcu_work );
1976
2026
1977
- /**
1978
- * worker_enter_idle - enter idle state
1979
- * @worker: worker which is entering idle state
1980
- *
1981
- * @worker is entering idle state. Update stats and idle timer if
1982
- * necessary.
1983
- *
1984
- * LOCKING:
1985
- * raw_spin_lock_irq(pool->lock).
1986
- */
1987
- static void worker_enter_idle (struct worker * worker )
1988
- {
1989
- struct worker_pool * pool = worker -> pool ;
1990
-
1991
- if (WARN_ON_ONCE (worker -> flags & WORKER_IDLE ) ||
1992
- WARN_ON_ONCE (!list_empty (& worker -> entry ) &&
1993
- (worker -> hentry .next || worker -> hentry .pprev )))
1994
- return ;
1995
-
1996
- /* can't use worker_set_flags(), also called from create_worker() */
1997
- worker -> flags |= WORKER_IDLE ;
1998
- pool -> nr_idle ++ ;
1999
- worker -> last_active = jiffies ;
2000
-
2001
- /* idle_list is LIFO */
2002
- list_add (& worker -> entry , & pool -> idle_list );
2003
-
2004
- if (too_many_workers (pool ) && !timer_pending (& pool -> idle_timer ))
2005
- mod_timer (& pool -> idle_timer , jiffies + IDLE_WORKER_TIMEOUT );
2006
-
2007
- /* Sanity check nr_running. */
2008
- WARN_ON_ONCE (pool -> nr_workers == pool -> nr_idle && pool -> nr_running );
2009
- }
2010
-
2011
- /**
2012
- * worker_leave_idle - leave idle state
2013
- * @worker: worker which is leaving idle state
2014
- *
2015
- * @worker is leaving idle state. Update stats.
2016
- *
2017
- * LOCKING:
2018
- * raw_spin_lock_irq(pool->lock).
2019
- */
2020
- static void worker_leave_idle (struct worker * worker )
2021
- {
2022
- struct worker_pool * pool = worker -> pool ;
2023
-
2024
- if (WARN_ON_ONCE (!(worker -> flags & WORKER_IDLE )))
2025
- return ;
2026
- worker_clr_flags (worker , WORKER_IDLE );
2027
- pool -> nr_idle -- ;
2028
- list_del_init (& worker -> entry );
2029
- }
2030
-
2031
2027
static struct worker * alloc_worker (int node )
2032
2028
{
2033
2029
struct worker * worker ;
0 commit comments