41
41
42
42
#pragma mark static data for management subsystem
43
43
44
- /*
45
- * The number of runnable worker threads as observed by
46
- * the scheduling management subsystem.
47
- */
48
- static volatile int32_t _dispatch_workq_runnable_workers ;
49
-
50
- /*
51
- * The desired number of runnable worker threads
52
- * for a workqueue (assuming sufficient work).
53
- */
54
- static int32_t _dispatch_workq_target_runnable_workers ;
55
-
56
44
#if DISPATCH_ENABLE_PWQ_KEXT
57
45
/* Are we using user-level or kext based management? */
58
46
static bool _dispatch_workq_kext_active ;
59
47
#endif
60
48
61
49
/*
62
- * State for the user-level pool management .
50
+ * State for the user-level monitoring of a workqueue .
63
51
*/
64
52
typedef struct dispatch_workq_manager_s {
53
+ /* The observed number of runnable registered workers */
54
+ int32_t runnable_workers ;
55
+
56
+ /* The desired number of runnable registered workers */
57
+ int32_t target_runnable_workers ;
58
+
65
59
/*
66
- * Tracking of registered workers; all updates and reads
67
- * are performed while holding the lock.
68
- */
60
+ * Tracking of registered workers; all accesses must hold lock.
61
+ * Invariant: registered_workers[0]...registered_workers[num_registered_workers-1]
62
+ * contain the pids of the workers that we are managing.
63
+ */
69
64
dispatch_unfair_lock_s registered_worker_lock ;
70
- int num_registered_workers ;
71
65
pid_t * registered_workers ;
72
-
66
+ int num_registered_workers ;
73
67
} dispatch_workq_manager_s , * dispatch_workq_manager_t ;
74
68
75
69
static dispatch_workq_manager_s _dispatch_workq_manager ;
76
70
77
- #pragma mark backdoors into queue.c internals
71
+ #pragma mark inline helper functions
72
+
73
+ DISPATCH_INLINE
74
+ dispatch_workq_manager_t
75
+ _dispatch_workq_get_default_manager () {
76
+ return & _dispatch_workq_manager ;
77
+ }
78
78
79
79
DISPATCH_INLINE
80
80
dispatch_queue_t
@@ -114,17 +114,18 @@ dispatch_workq_worker_register(dispatch_queue_t root_q)
114
114
return true;
115
115
}
116
116
#endif
117
+ dispatch_workq_manager_t mgr = _dispatch_workq_get_default_manager ();
117
118
bool rc ;
118
119
int tid = syscall (SYS_gettid );
119
- _dispatch_unfair_lock_lock (& _dispatch_workq_manager . registered_worker_lock );
120
- if (_dispatch_workq_manager . num_registered_workers < WORKQ_MAX_TRACKED_WORKERS - 1 ) {
121
- int worker_id = _dispatch_workq_manager . num_registered_workers ++ ;
122
- _dispatch_workq_manager . registered_workers [worker_id ] = tid ;
120
+ _dispatch_unfair_lock_lock (& mgr -> registered_worker_lock );
121
+ if (mgr -> num_registered_workers < WORKQ_MAX_TRACKED_WORKERS - 1 ) {
122
+ int worker_id = mgr -> num_registered_workers ++ ;
123
+ mgr -> registered_workers [worker_id ] = tid ;
123
124
rc = true;
124
125
} else {
125
126
rc = false;
126
127
}
127
- _dispatch_unfair_lock_unlock (& _dispatch_workq_manager . registered_worker_lock );
128
+ _dispatch_unfair_lock_unlock (& mgr -> registered_worker_lock );
128
129
129
130
return rc ;
130
131
}
@@ -143,18 +144,19 @@ dispatch_workq_worker_unregister(dispatch_queue_t root_q)
143
144
return ;
144
145
}
145
146
#endif
147
+ dispatch_workq_manager_t mgr = _dispatch_workq_get_default_manager ();
146
148
int tid = syscall (SYS_gettid );
147
- _dispatch_unfair_lock_lock (& _dispatch_workq_manager . registered_worker_lock );
148
- for (int i = 0 ; i < _dispatch_workq_manager . num_registered_workers ; i ++ ) {
149
- if (_dispatch_workq_manager . registered_workers [i ] == tid ) {
150
- int last = _dispatch_workq_manager . num_registered_workers - 1 ;
151
- _dispatch_workq_manager . registered_workers [i ] = _dispatch_workq_manager . registered_workers [last ];
152
- _dispatch_workq_manager . registered_workers [last ] = 0 ;
153
- _dispatch_workq_manager . num_registered_workers -- ;
149
+ _dispatch_unfair_lock_lock (& mgr -> registered_worker_lock );
150
+ for (int i = 0 ; i < mgr -> num_registered_workers ; i ++ ) {
151
+ if (mgr -> registered_workers [i ] == tid ) {
152
+ int last = mgr -> num_registered_workers - 1 ;
153
+ mgr -> registered_workers [i ] = mgr -> registered_workers [last ];
154
+ mgr -> registered_workers [last ] = 0 ;
155
+ mgr -> num_registered_workers -- ;
154
156
break ;
155
157
}
156
158
}
157
- _dispatch_unfair_lock_unlock (& _dispatch_workq_manager . registered_worker_lock );
159
+ _dispatch_unfair_lock_unlock (& mgr -> registered_worker_lock );
158
160
}
159
161
160
162
@@ -163,19 +165,19 @@ dispatch_workq_worker_unregister(dispatch_queue_t root_q)
163
165
* to get a count of the number of them that are actually runnable.
164
166
* See the proc(5) man page for the format of the contents of /proc/[pid]/stat
165
167
*/
166
- static int
167
- _dispatch_workq_count_runnable_workers (void )
168
+ static void
169
+ _dispatch_workq_count_runnable_workers (dispatch_workq_manager_t mgr )
168
170
{
169
171
char path [128 ];
170
172
char buf [4096 ];
171
173
int running_count = 0 ;
172
174
173
175
memset (buf , 0 , sizeof (buf ));
174
176
175
- _dispatch_unfair_lock_lock (& _dispatch_workq_manager . registered_worker_lock );
177
+ _dispatch_unfair_lock_lock (& mgr -> registered_worker_lock );
176
178
177
- for (int i = 0 ; i < _dispatch_workq_manager . num_registered_workers ; i ++ ) {
178
- pid_t worker_pid = _dispatch_workq_manager . registered_workers [i ];
179
+ for (int i = 0 ; i < mgr -> num_registered_workers ; i ++ ) {
180
+ pid_t worker_pid = mgr -> registered_workers [i ];
179
181
int fd ;
180
182
size_t bytes_read = -1 ;
181
183
@@ -188,10 +190,10 @@ _dispatch_workq_count_runnable_workers(void)
188
190
// Must mean worker exited uncleanly (without executing _dispatch_worker_unregister())
189
191
// Clean up by removing pid and decrementing number of registered workers
190
192
_dispatch_debug ("workq: Unable to open /proc/%d/stat; removing worker from monitoring list" , worker_pid );
191
- int last = _dispatch_workq_manager . num_registered_workers - 1 ;
192
- _dispatch_workq_manager . registered_workers [i ] = _dispatch_workq_manager . registered_workers [last ];
193
- _dispatch_workq_manager . registered_workers [last ] = 0 ;
194
- _dispatch_workq_manager . num_registered_workers -- ;
193
+ int last = mgr -> num_registered_workers - 1 ;
194
+ mgr -> registered_workers [i ] = mgr -> registered_workers [last ];
195
+ mgr -> registered_workers [last ] = 0 ;
196
+ mgr -> num_registered_workers -- ;
195
197
} else {
196
198
bytes_read = read (fd , buf , sizeof (buf ));
197
199
(void )close (fd );
@@ -213,31 +215,33 @@ _dispatch_workq_count_runnable_workers(void)
213
215
}
214
216
}
215
217
216
- _dispatch_unfair_lock_unlock ( & _dispatch_workq_manager . registered_worker_lock ) ;
218
+ mgr -> runnable_workers = running_count ;
217
219
218
- return running_count ;
220
+ _dispatch_unfair_lock_unlock ( & mgr -> registered_worker_lock ) ;
219
221
}
220
222
221
223
static void
222
224
_dispatch_workq_monitor_thread_pool (void * context DISPATCH_UNUSED )
223
225
{
226
+ dispatch_workq_manager_t mgr = _dispatch_workq_get_default_manager ();
224
227
dispatch_queue_t dq = _dispatch_workq_get_default_root_queue ();
225
228
bool work_available = _dispatch_workq_root_queue_has_work (dq );
226
229
if (work_available ) {
227
- _dispatch_workq_runnable_workers = _dispatch_workq_count_runnable_workers ();
230
+ _dispatch_workq_count_runnable_workers (mgr );
228
231
229
232
_dispatch_debug ("workq: %s is non-empty and has %d runnable workers\n" ,
230
- dq -> dq_label , _dispatch_workq_runnable_workers );
233
+ dq -> dq_label , mgr -> runnable_workers );
231
234
232
- if (_dispatch_workq_runnable_workers < _dispatch_workq_target_runnable_workers ) {
233
- int32_t count = _dispatch_pthread_root_queue_size (dq );
234
- int32_t allowed_over = WORKQ_OVERSUBSCRIBE_FACTOR * _dispatch_workq_target_runnable_workers ;
235
+ if (mgr -> runnable_workers < mgr -> target_runnable_workers ) {
236
+ int32_t count = _dispatch_pthread_root_queue_thread_pool_size (dq );
237
+ int32_t allowed_over = WORKQ_OVERSUBSCRIBE_FACTOR * mgr -> target_runnable_workers ;
238
+ allowed_over = MIN (allowed_over , WORKQ_MAX_TRACKED_WORKERS - mgr -> target_runnable_workers );
235
239
if (count + allowed_over > 0 ) {
236
240
_dispatch_debug ("workq: %s has count %d; requesting 1 additional worker" ,
237
241
dq -> dq_label , count );
238
242
_dispatch_pthread_root_queue_oversubscribe (dq , 1 );
239
243
} else {
240
- _dispatch_debug ("workq: %s is already oversubscribed by %d; taking no action" ,
244
+ _dispatch_debug ("workq: %s already oversubscribed by %d; taking no action" ,
241
245
dq -> dq_label , - count );
242
246
}
243
247
}
@@ -257,10 +261,9 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
257
261
return ;
258
262
}
259
263
#endif
260
- _dispatch_workq_manager .registered_workers =
261
- _dispatch_calloc (WORKQ_MAX_TRACKED_WORKERS , sizeof (pid_t ));
262
-
263
- _dispatch_workq_target_runnable_workers = dispatch_hw_config (active_cpus );
264
+ dispatch_workq_manager_t mgr = _dispatch_workq_get_default_manager ();
265
+ mgr -> registered_workers = _dispatch_calloc (WORKQ_MAX_TRACKED_WORKERS , sizeof (pid_t ));
266
+ mgr -> target_runnable_workers = dispatch_hw_config (active_cpus );
264
267
265
268
// Create monitoring timer that will periodically run on dispatch_mgr_q
266
269
dispatch_source_t ds = dispatch_source_create (DISPATCH_SOURCE_TYPE_TIMER , 0 , 0 , & _dispatch_mgr_q );
0 commit comments