@@ -135,12 +135,12 @@ typedef struct dispatch_workq_item_s {
135
135
void * func_arg ;
136
136
} dispatch_workq_item_s , * dispatch_workq_item_t ;
137
137
138
- typedef struct dispatch_workq_workqueue_s {
138
+ typedef struct dispatch_workq_context_s {
139
139
STAILQ_HEAD (,dispatch_workq_item_s ) item_listhead ;
140
140
dispatch_unfair_lock_s lock ;
141
141
int priority ;
142
142
bool overcommit ;
143
- } dispatch_workq_workqueue_s , * dispatch_workq_workqueue_t ;
143
+ } dispatch_workq_context_s , * dispatch_workq_context_t ;
144
144
145
145
/*
146
146
* The overcommit pool uses a simple coarse locking policy:
@@ -154,7 +154,7 @@ typedef struct dispatch_workq_workqueue_s {
154
154
typedef struct dispatch_workq_overcommit_pool_s {
155
155
uint32_t mask ;
156
156
int num_spares ;
157
- dispatch_workq_workqueue_t wqs [WORKQ_NUM_PRIORITIES ];
157
+ dispatch_workq_context_t contexts [WORKQ_NUM_PRIORITIES ];
158
158
pthread_mutex_t mutex ;
159
159
pthread_cond_t spare_workers ;
160
160
} dispatch_workq_overcommit_pool_s , * dispatch_workq_overcommit_pool_t ;
@@ -171,7 +171,7 @@ static dispatch_workq_overcommit_pool_s _dispatch_workq_overcommit_pool;
171
171
*/
172
172
typedef struct dispatch_workq_pool_s {
173
173
volatile uint32_t mask ;
174
- dispatch_workq_workqueue_t wqs [WORKQ_NUM_PRIORITIES ];
174
+ dispatch_workq_context_t contexts [WORKQ_NUM_PRIORITIES ];
175
175
} dispatch_workq_pool_s , * dispatch_workq_pool_t ;
176
176
177
177
static dispatch_workq_pool_s _dispatch_workq_normal_pool ;
@@ -209,18 +209,18 @@ _dispatch_workq_dealloc_item(dispatch_workq_item_t wi)
209
209
210
210
211
211
static void
212
- _dispatch_workq_allocate_workqueues ( dispatch_workq_workqueue_t * queues ,
212
+ _dispatch_workq_allocate_contexts ( dispatch_workq_context_t * contexts ,
213
213
int num_queues , bool overcommit )
214
214
{
215
215
for (int i = 0 ; i < num_queues ; i ++ ) {
216
- dispatch_workq_workqueue_t wq ;
217
- wq = _dispatch_calloc (1 , ROUND_UP_TO_CACHELINE_SIZE (sizeof (* wq )));
216
+ dispatch_workq_context_t ctx ;
217
+ ctx = _dispatch_calloc (1 , ROUND_UP_TO_CACHELINE_SIZE (sizeof (* ctx )));
218
218
219
- wq -> priority = i ;
220
- wq -> overcommit = overcommit ;
221
- STAILQ_INIT (& wq -> item_listhead );
219
+ ctx -> priority = i ;
220
+ ctx -> overcommit = overcommit ;
221
+ STAILQ_INIT (& ctx -> item_listhead );
222
222
223
- queues [i ] = wq ;
223
+ contexts [i ] = ctx ;
224
224
}
225
225
}
226
226
@@ -272,7 +272,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
272
272
_dispatch_workq_management_init ();
273
273
274
274
// overcommit pool
275
- _dispatch_workq_allocate_workqueues (_dispatch_workq_overcommit_pool .wqs ,
275
+ _dispatch_workq_allocate_contexts (_dispatch_workq_overcommit_pool .contexts ,
276
276
WORKQ_NUM_PRIORITIES , true);
277
277
r = pthread_mutex_init (& _dispatch_workq_overcommit_pool .mutex , NULL );
278
278
(void )dispatch_assume_zero (r );
@@ -281,7 +281,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
281
281
(void )dispatch_assume_zero (r );
282
282
283
283
// normal pool
284
- _dispatch_workq_allocate_workqueues (_dispatch_workq_normal_pool .wqs ,
284
+ _dispatch_workq_allocate_contexts (_dispatch_workq_normal_pool .contexts ,
285
285
WORKQ_NUM_PRIORITIES , false);
286
286
287
287
// create initial set of normal workers
@@ -292,33 +292,33 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
292
292
293
293
294
294
int
295
- dispatch_workq_get_wq (dispatch_workq_workqueue_t * workqp ,
295
+ dispatch_workq_get_wq (dispatch_workq_context_t * ctxp ,
296
296
int priority , int overcommit )
297
297
{
298
298
dispatch_once_f (& _dispatch_workq_init_once_pred , NULL ,
299
299
& _dispatch_workq_init_once );
300
300
301
301
dispatch_assert (priority >= 0 && priority < WORKQ_NUM_PRIORITIES );
302
302
if (overcommit ) {
303
- * workqp = _dispatch_workq_overcommit_pool .wqs [priority ];
303
+ * ctxp = _dispatch_workq_overcommit_pool .contexts [priority ];
304
304
} else {
305
- * workqp = _dispatch_workq_normal_pool .wqs [priority ];
305
+ * ctxp = _dispatch_workq_normal_pool .contexts [priority ];
306
306
}
307
307
308
308
return 0 ;
309
309
}
310
310
311
311
int
312
- dispatch_workq_additem_np (dispatch_workq_workqueue_t workq ,
312
+ dispatch_workq_additem_np (dispatch_workq_context_t ctx ,
313
313
void (* item_func )(void * ), void * item_arg )
314
314
{
315
315
dispatch_workq_item_t wi = _dispatch_workq_alloc_item (item_func , item_arg );
316
- unsigned int wq_index_bit = (0x1 << workq -> priority );
316
+ unsigned int wq_index_bit = (0x1 << ctx -> priority );
317
317
318
- if (unlikely (workq -> overcommit )) {
318
+ if (unlikely (ctx -> overcommit )) {
319
319
// overcommit pool uses trival concurrency control: all operations done holding pool mutex
320
320
pthread_mutex_lock (& _dispatch_workq_overcommit_pool .mutex );
321
- STAILQ_INSERT_TAIL (& workq -> item_listhead , wi , item_entry );
321
+ STAILQ_INSERT_TAIL (& ctx -> item_listhead , wi , item_entry );
322
322
_dispatch_workq_overcommit_pool .mask |= wq_index_bit ;
323
323
if (_dispatch_workq_overcommit_pool .num_spares > 0 ) {
324
324
pthread_cond_signal (& _dispatch_workq_overcommit_pool .spare_workers );
@@ -329,12 +329,12 @@ dispatch_workq_additem_np(dispatch_workq_workqueue_t workq,
329
329
pthread_mutex_unlock (& _dispatch_workq_overcommit_pool .mutex );
330
330
} else {
331
331
// normal pool uses finer-grained wq locking and atomic memory ops.
332
- _dispatch_unfair_lock_lock (& workq -> lock );
333
- if (STAILQ_EMPTY (& workq -> item_listhead )) {
332
+ _dispatch_unfair_lock_lock (& ctx -> lock );
333
+ if (STAILQ_EMPTY (& ctx -> item_listhead )) {
334
334
os_atomic_or (& _dispatch_workq_normal_pool .mask , wq_index_bit , relaxed );
335
335
}
336
- STAILQ_INSERT_TAIL (& workq -> item_listhead , wi , item_entry );
337
- _dispatch_unfair_lock_unlock (& workq -> lock );
336
+ STAILQ_INSERT_TAIL (& ctx -> item_listhead , wi , item_entry );
337
+ _dispatch_unfair_lock_unlock (& ctx -> lock );
338
338
339
339
if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers )) {
340
340
_dispatch_workq_work_added ();
@@ -349,15 +349,15 @@ static void
349
349
_dispatch_workq_add_control_item (void * op_code )
350
350
{
351
351
dispatch_workq_item_t wi = _dispatch_workq_alloc_item (NULL , op_code );
352
- dispatch_workq_workqueue_t workq = _dispatch_workq_normal_pool .wqs [0 ];
352
+ dispatch_workq_context_t ctx = _dispatch_workq_normal_pool .contexts [0 ];
353
353
unsigned int wq_index_bit = 1 ; // highest priority queue
354
354
355
- _dispatch_unfair_lock_lock (& workq -> lock );
356
- if (STAILQ_EMPTY (& workq -> item_listhead )) {
355
+ _dispatch_unfair_lock_lock (& ctx -> lock );
356
+ if (STAILQ_EMPTY (& ctx -> item_listhead )) {
357
357
os_atomic_or (& _dispatch_workq_normal_pool .mask , wq_index_bit , relaxed );
358
358
}
359
- STAILQ_INSERT_TAIL (& workq -> item_listhead , wi , item_entry );
360
- _dispatch_unfair_lock_unlock (& workq -> lock );
359
+ STAILQ_INSERT_TAIL (& ctx -> item_listhead , wi , item_entry );
360
+ _dispatch_unfair_lock_unlock (& ctx -> lock );
361
361
362
362
if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers )) {
363
363
_dispatch_workq_work_added ();
@@ -404,13 +404,13 @@ _dispatch_workq_overcommit_worker_main(void *context DISPATCH_UNUSED)
404
404
for (;;) {
405
405
unsigned int idx = __builtin_ffs (oc_pool -> mask );
406
406
if (idx > 0 ) {
407
- dispatch_workq_workqueue_t wq = oc_pool -> wqs [idx - 1 ];
408
- dispatch_workq_item_t work = STAILQ_FIRST (& wq -> item_listhead );
407
+ dispatch_workq_context_t ctx = oc_pool -> contexts [idx - 1 ];
408
+ dispatch_workq_item_t work = STAILQ_FIRST (& ctx -> item_listhead );
409
409
if (work != NULL ) {
410
410
/* Remove the first work item */
411
- STAILQ_REMOVE_HEAD (& wq -> item_listhead , item_entry );
412
- if (STAILQ_EMPTY (& wq -> item_listhead )) {
413
- oc_pool -> mask &= ~(0x1 << wq -> priority );
411
+ STAILQ_REMOVE_HEAD (& ctx -> item_listhead , item_entry );
412
+ if (STAILQ_EMPTY (& ctx -> item_listhead )) {
413
+ oc_pool -> mask &= ~(0x1 << ctx -> priority );
414
414
}
415
415
416
416
/* Release pool mutex */
@@ -460,20 +460,20 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
460
460
for (;;) {
461
461
int idx = __builtin_ffs (os_atomic_load (& pool -> mask , relaxed ));
462
462
if (idx > 0 ) {
463
- dispatch_workq_workqueue_t wq = pool -> wqs [idx - 1 ];
464
- _dispatch_unfair_lock_lock (& wq -> lock );
465
- dispatch_workq_item_t work = STAILQ_FIRST (& wq -> item_listhead );
463
+ dispatch_workq_context_t ctx = pool -> contexts [idx - 1 ];
464
+ _dispatch_unfair_lock_lock (& ctx -> lock );
465
+ dispatch_workq_item_t work = STAILQ_FIRST (& ctx -> item_listhead );
466
466
if (unlikely (work == NULL )) {
467
467
// wasn't actually work due to racy read of mask
468
- _dispatch_unfair_lock_unlock (& wq -> lock );
468
+ _dispatch_unfair_lock_unlock (& ctx -> lock );
469
469
continue ;
470
470
} else {
471
471
// found work: remove it.
472
- STAILQ_REMOVE_HEAD (& wq -> item_listhead , item_entry );
473
- if (STAILQ_EMPTY (& wq -> item_listhead )) {
474
- os_atomic_and (& pool -> mask , ~(0x1 << wq -> priority ), relaxed );
472
+ STAILQ_REMOVE_HEAD (& ctx -> item_listhead , item_entry );
473
+ if (STAILQ_EMPTY (& ctx -> item_listhead )) {
474
+ os_atomic_and (& pool -> mask , ~(0x1 << ctx -> priority ), relaxed );
475
475
}
476
- _dispatch_unfair_lock_unlock (& wq -> lock );
476
+ _dispatch_unfair_lock_unlock (& ctx -> lock );
477
477
478
478
/* Execute the work item */
479
479
void (* func )(void * ) = work -> func ;
0 commit comments