Skip to content

Commit 99b8625

Browse files
committed
dispatch_workq_workqueue ==> dispatch_workq_context
1 parent a275568 commit 99b8625

File tree

2 files changed

+43
-43
lines changed

2 files changed

+43
-43
lines changed

src/workqueue/workqueue.c

+42-42
Original file line numberDiff line numberDiff line change
@@ -135,12 +135,12 @@ typedef struct dispatch_workq_item_s {
135135
void *func_arg;
136136
} dispatch_workq_item_s, *dispatch_workq_item_t;
137137

138-
typedef struct dispatch_workq_workqueue_s {
138+
typedef struct dispatch_workq_context_s {
139139
STAILQ_HEAD(,dispatch_workq_item_s) item_listhead;
140140
dispatch_unfair_lock_s lock;
141141
int priority;
142142
bool overcommit;
143-
} dispatch_workq_workqueue_s, *dispatch_workq_workqueue_t;
143+
} dispatch_workq_context_s, *dispatch_workq_context_t;
144144

145145
/*
146146
* The overcommit pool uses a simple coarse locking policy:
@@ -154,7 +154,7 @@ typedef struct dispatch_workq_workqueue_s {
154154
typedef struct dispatch_workq_overcommit_pool_s {
155155
uint32_t mask;
156156
int num_spares;
157-
dispatch_workq_workqueue_t wqs[WORKQ_NUM_PRIORITIES];
157+
dispatch_workq_context_t contexts[WORKQ_NUM_PRIORITIES];
158158
pthread_mutex_t mutex;
159159
pthread_cond_t spare_workers;
160160
} dispatch_workq_overcommit_pool_s, *dispatch_workq_overcommit_pool_t;
@@ -171,7 +171,7 @@ static dispatch_workq_overcommit_pool_s _dispatch_workq_overcommit_pool;
171171
*/
172172
typedef struct dispatch_workq_pool_s {
173173
volatile uint32_t mask;
174-
dispatch_workq_workqueue_t wqs[WORKQ_NUM_PRIORITIES];
174+
dispatch_workq_context_t contexts[WORKQ_NUM_PRIORITIES];
175175
} dispatch_workq_pool_s, *dispatch_workq_pool_t;
176176

177177
static dispatch_workq_pool_s _dispatch_workq_normal_pool;
@@ -209,18 +209,18 @@ _dispatch_workq_dealloc_item(dispatch_workq_item_t wi)
209209

210210

211211
static void
212-
_dispatch_workq_allocate_workqueues(dispatch_workq_workqueue_t *queues,
212+
_dispatch_workq_allocate_contexts(dispatch_workq_context_t *contexts,
213213
int num_queues, bool overcommit)
214214
{
215215
for (int i=0; i<num_queues; i++) {
216-
dispatch_workq_workqueue_t wq;
217-
wq = _dispatch_calloc(1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*wq)));
216+
dispatch_workq_context_t ctx;
217+
ctx = _dispatch_calloc(1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*ctx)));
218218

219-
wq->priority = i;
220-
wq->overcommit = overcommit;
221-
STAILQ_INIT(&wq->item_listhead);
219+
ctx->priority = i;
220+
ctx->overcommit = overcommit;
221+
STAILQ_INIT(&ctx->item_listhead);
222222

223-
queues[i] = wq;
223+
contexts[i] = ctx;
224224
}
225225
}
226226

@@ -272,7 +272,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
272272
_dispatch_workq_management_init();
273273

274274
// overcommit pool
275-
_dispatch_workq_allocate_workqueues(_dispatch_workq_overcommit_pool.wqs,
275+
_dispatch_workq_allocate_contexts(_dispatch_workq_overcommit_pool.contexts,
276276
WORKQ_NUM_PRIORITIES, true);
277277
r = pthread_mutex_init(&_dispatch_workq_overcommit_pool.mutex, NULL);
278278
(void)dispatch_assume_zero(r);
@@ -281,7 +281,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
281281
(void)dispatch_assume_zero(r);
282282

283283
// normal pool
284-
_dispatch_workq_allocate_workqueues(_dispatch_workq_normal_pool.wqs,
284+
_dispatch_workq_allocate_contexts(_dispatch_workq_normal_pool.contexts,
285285
WORKQ_NUM_PRIORITIES, false);
286286

287287
// create initial set of normal workers
@@ -292,33 +292,33 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
292292

293293

294294
int
295-
dispatch_workq_get_wq(dispatch_workq_workqueue_t *workqp,
295+
dispatch_workq_get_wq(dispatch_workq_context_t *ctxp,
296296
int priority, int overcommit)
297297
{
298298
dispatch_once_f(&_dispatch_workq_init_once_pred, NULL,
299299
&_dispatch_workq_init_once);
300300

301301
dispatch_assert(priority >= 0 && priority < WORKQ_NUM_PRIORITIES);
302302
if (overcommit) {
303-
*workqp = _dispatch_workq_overcommit_pool.wqs[priority];
303+
*ctxp = _dispatch_workq_overcommit_pool.contexts[priority];
304304
} else {
305-
*workqp = _dispatch_workq_normal_pool.wqs[priority];
305+
*ctxp = _dispatch_workq_normal_pool.contexts[priority];
306306
}
307307

308308
return 0;
309309
}
310310

311311
int
312-
dispatch_workq_additem_np(dispatch_workq_workqueue_t workq,
312+
dispatch_workq_additem_np(dispatch_workq_context_t ctx,
313313
void (*item_func)(void *), void *item_arg)
314314
{
315315
dispatch_workq_item_t wi = _dispatch_workq_alloc_item(item_func, item_arg);
316-
unsigned int wq_index_bit = (0x1 << workq->priority);
316+
unsigned int wq_index_bit = (0x1 << ctx->priority);
317317

318-
if (unlikely(workq->overcommit)) {
318+
if (unlikely(ctx->overcommit)) {
319319
// overcommit pool uses trival concurrency control: all operations done holding pool mutex
320320
pthread_mutex_lock(&_dispatch_workq_overcommit_pool.mutex);
321-
STAILQ_INSERT_TAIL(&workq->item_listhead, wi, item_entry);
321+
STAILQ_INSERT_TAIL(&ctx->item_listhead, wi, item_entry);
322322
_dispatch_workq_overcommit_pool.mask |= wq_index_bit;
323323
if (_dispatch_workq_overcommit_pool.num_spares > 0) {
324324
pthread_cond_signal(&_dispatch_workq_overcommit_pool.spare_workers);
@@ -329,12 +329,12 @@ dispatch_workq_additem_np(dispatch_workq_workqueue_t workq,
329329
pthread_mutex_unlock(&_dispatch_workq_overcommit_pool.mutex);
330330
} else {
331331
// normal pool uses finer-grained wq locking and atomic memory ops.
332-
_dispatch_unfair_lock_lock(&workq->lock);
333-
if (STAILQ_EMPTY(&workq->item_listhead)) {
332+
_dispatch_unfair_lock_lock(&ctx->lock);
333+
if (STAILQ_EMPTY(&ctx->item_listhead)) {
334334
os_atomic_or(&_dispatch_workq_normal_pool.mask, wq_index_bit, relaxed);
335335
}
336-
STAILQ_INSERT_TAIL(&workq->item_listhead, wi, item_entry);
337-
_dispatch_unfair_lock_unlock(&workq->lock);
336+
STAILQ_INSERT_TAIL(&ctx->item_listhead, wi, item_entry);
337+
_dispatch_unfair_lock_unlock(&ctx->lock);
338338

339339
if (unlikely(_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers)) {
340340
_dispatch_workq_work_added();
@@ -349,15 +349,15 @@ static void
349349
_dispatch_workq_add_control_item(void *op_code)
350350
{
351351
dispatch_workq_item_t wi = _dispatch_workq_alloc_item(NULL, op_code);
352-
dispatch_workq_workqueue_t workq = _dispatch_workq_normal_pool.wqs[0];
352+
dispatch_workq_context_t ctx = _dispatch_workq_normal_pool.contexts[0];
353353
unsigned int wq_index_bit = 1; // highest priority queue
354354

355-
_dispatch_unfair_lock_lock(&workq->lock);
356-
if (STAILQ_EMPTY(&workq->item_listhead)) {
355+
_dispatch_unfair_lock_lock(&ctx->lock);
356+
if (STAILQ_EMPTY(&ctx->item_listhead)) {
357357
os_atomic_or(&_dispatch_workq_normal_pool.mask, wq_index_bit, relaxed);
358358
}
359-
STAILQ_INSERT_TAIL(&workq->item_listhead, wi, item_entry);
360-
_dispatch_unfair_lock_unlock(&workq->lock);
359+
STAILQ_INSERT_TAIL(&ctx->item_listhead, wi, item_entry);
360+
_dispatch_unfair_lock_unlock(&ctx->lock);
361361

362362
if (unlikely(_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers)) {
363363
_dispatch_workq_work_added();
@@ -404,13 +404,13 @@ _dispatch_workq_overcommit_worker_main(void *context DISPATCH_UNUSED)
404404
for (;;) {
405405
unsigned int idx = __builtin_ffs(oc_pool->mask);
406406
if (idx > 0) {
407-
dispatch_workq_workqueue_t wq = oc_pool->wqs[idx - 1];
408-
dispatch_workq_item_t work = STAILQ_FIRST(&wq->item_listhead);
407+
dispatch_workq_context_t ctx = oc_pool->contexts[idx - 1];
408+
dispatch_workq_item_t work = STAILQ_FIRST(&ctx->item_listhead);
409409
if (work != NULL) {
410410
/* Remove the first work item */
411-
STAILQ_REMOVE_HEAD(&wq->item_listhead, item_entry);
412-
if (STAILQ_EMPTY(&wq->item_listhead)) {
413-
oc_pool->mask &= ~(0x1 << wq->priority);
411+
STAILQ_REMOVE_HEAD(&ctx->item_listhead, item_entry);
412+
if (STAILQ_EMPTY(&ctx->item_listhead)) {
413+
oc_pool->mask &= ~(0x1 << ctx->priority);
414414
}
415415

416416
/* Release pool mutex */
@@ -460,20 +460,20 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
460460
for (;;) {
461461
int idx = __builtin_ffs(os_atomic_load(&pool->mask, relaxed));
462462
if (idx > 0) {
463-
dispatch_workq_workqueue_t wq = pool->wqs[idx - 1];
464-
_dispatch_unfair_lock_lock(&wq->lock);
465-
dispatch_workq_item_t work = STAILQ_FIRST(&wq->item_listhead);
463+
dispatch_workq_context_t ctx = pool->contexts[idx - 1];
464+
_dispatch_unfair_lock_lock(&ctx->lock);
465+
dispatch_workq_item_t work = STAILQ_FIRST(&ctx->item_listhead);
466466
if (unlikely(work == NULL)) {
467467
// wasn't actually work due to racy read of mask
468-
_dispatch_unfair_lock_unlock(&wq->lock);
468+
_dispatch_unfair_lock_unlock(&ctx->lock);
469469
continue;
470470
} else {
471471
// found work: remove it.
472-
STAILQ_REMOVE_HEAD(&wq->item_listhead, item_entry);
473-
if (STAILQ_EMPTY(&wq->item_listhead)) {
474-
os_atomic_and(&pool->mask, ~(0x1 << wq->priority), relaxed);
472+
STAILQ_REMOVE_HEAD(&ctx->item_listhead, item_entry);
473+
if (STAILQ_EMPTY(&ctx->item_listhead)) {
474+
os_atomic_and(&pool->mask, ~(0x1 << ctx->priority), relaxed);
475475
}
476-
_dispatch_unfair_lock_unlock(&wq->lock);
476+
_dispatch_unfair_lock_unlock(&ctx->lock);
477477

478478
/* Execute the work item */
479479
void (*func)(void *) = work->func;

src/workqueue/workqueue_internal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#ifndef __DISPATCH_WORKQUEUE_INTERNAL__
2828
#define __DISPATCH_WORKQUEUE_INTERNAL__
2929

30-
typedef struct dispatch_workq_workqueue_s *pthread_workqueue_t;
30+
typedef struct dispatch_workq_context_s *pthread_workqueue_t;
3131

3232
/* Work queue priority attributes. */
3333
#define WORKQ_HIGH_PRIOQUEUE 0

0 commit comments

Comments
 (0)