@@ -39,7 +39,7 @@ sched_enqueue(struct sched *sched, struct exec_context *ctx)
39
39
{
40
40
xlog_trace ("%s: enqueueing ctx %p" , __func__ , (void * )ctx );
41
41
assert (sched == ctx -> sched );
42
- LIST_INSERT_TAIL (& sched -> runq , ctx , rq );
42
+ SLIST_INSERT_TAIL (& sched -> runq , ctx , rq );
43
43
}
44
44
45
45
#define RR_INTERVAL_MS 100
@@ -50,9 +50,9 @@ sched_run(struct sched *sched, struct exec_context *caller)
50
50
struct runq * q = & sched -> runq ;
51
51
struct exec_context * ctx ;
52
52
53
- while ((ctx = LIST_FIRST (q )) != NULL ) {
53
+ while ((ctx = SLIST_FIRST (q )) != NULL ) {
54
54
int ret ;
55
- LIST_REMOVE (q , ctx , rq );
55
+ SLIST_REMOVE_HEAD (q , ctx , rq );
56
56
xlog_trace ("%s: running ctx %p" , __func__ , (void * )ctx );
57
57
ret = abstime_from_reltime_ms (
58
58
CLOCK_MONOTONIC , & sched -> next_resched , RR_INTERVAL_MS );
@@ -71,7 +71,7 @@ sched_run(struct sched *sched, struct exec_context *caller)
71
71
if (IS_RESTARTABLE (ret ) && ret != ETOYWASMUSERINTERRUPT ) {
72
72
xlog_trace ("%s: re-enqueueing ctx %p" , __func__ ,
73
73
(void * )ctx );
74
- LIST_INSERT_TAIL (q , ctx , rq );
74
+ SLIST_INSERT_TAIL (q , ctx , rq );
75
75
continue ;
76
76
}
77
77
xlog_trace ("%s: finishing ctx %p" , __func__ , (void * )ctx );
@@ -87,7 +87,7 @@ sched_run(struct sched *sched, struct exec_context *caller)
87
87
void
88
88
sched_init (struct sched * sched )
89
89
{
90
- LIST_HEAD_INIT (& sched -> runq );
90
+ SLIST_HEAD_INIT (& sched -> runq );
91
91
}
92
92
93
93
void
@@ -102,7 +102,7 @@ sched_need_resched(struct sched *sched)
102
102
int ret ;
103
103
104
104
/* if we are the only thread, no point to resched. */
105
- if (LIST_FIRST (& sched -> runq ) == NULL ) {
105
+ if (SLIST_FIRST (& sched -> runq ) == NULL ) {
106
106
return false;
107
107
}
108
108
0 commit comments