Skip to content

Commit b4d0dc8

Browse files
committed
Add #prep_timeout
1 parent 17a22ed commit b4d0dc8

11 files changed

+481
-38
lines changed

CHANGELOG.md

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
- Add `#prep_timeout` and `AsyncOp`
2+
13
# 2024-11-14 Version 0.5
24

35
- Add `#waitpid`

ext/um/um.c

+5-4
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
7575
if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
7676

7777
op->flags |= OP_F_COMPLETED;
78-
if (unlikely(op->flags & OP_F_TRANSIENT))
78+
if (op->flags & OP_F_TRANSIENT)
7979
um_op_transient_remove(machine, op);
8080

8181
if (op->flags & OP_F_MULTISHOT) {
@@ -88,7 +88,8 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
8888
op->result.flags = cqe->flags;
8989
}
9090

91-
um_runqueue_push(machine, op);
91+
if (!(op->flags & OP_F_ASYNC))
92+
um_runqueue_push(machine, op);
9293
}
9394

9495
// copied from liburing/queue.c
@@ -180,7 +181,7 @@ inline VALUE um_fiber_switch(struct um *machine) {
180181
}
181182
}
182183

183-
static inline void um_submit_cancel_op(struct um *machine, struct um_op *op) {
184+
void um_submit_cancel_op(struct um *machine, struct um_op *op) {
184185
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
185186
io_uring_prep_cancel64(sqe, (long long)op, 0);
186187
}
@@ -260,7 +261,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
260261
static ID ID_new = 0;
261262
if (!ID_new) ID_new = rb_intern("new");
262263

263-
struct um_op *op = malloc(sizeof(struct um_op));
264+
struct um_op *op = um_op_alloc(machine);
264265
um_prep_op(machine, op, OP_TIMEOUT);
265266
op->ts = um_double_to_timespec(NUM2DBL(interval));
266267
RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());

ext/um/um.h

+20-2
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,9 @@ enum op_kind {
4848

4949
#define OP_F_COMPLETED (1U << 0)
5050
#define OP_F_TRANSIENT (1U << 1)
51-
#define OP_F_IGNORE_CANCELED (1U << 2)
52-
#define OP_F_MULTISHOT (1U << 3)
51+
#define OP_F_ASYNC (1U << 2)
52+
#define OP_F_IGNORE_CANCELED (1U << 3)
53+
#define OP_F_MULTISHOT (1U << 4)
5354

5455
struct um_op_result {
5556
__s32 res;
@@ -66,6 +67,7 @@ struct um_op {
6667

6768
VALUE fiber;
6869
VALUE value;
70+
VALUE async_op;
6971

7072
struct um_op_result result;
7173
struct um_op_result *multishot_result_tail;
@@ -136,10 +138,19 @@ struct um_queue {
136138
uint32_t count;
137139
};
138140

141+
struct um_async_op {
142+
VALUE self;
143+
144+
struct um *machine;
145+
struct um_op *op;
146+
};
147+
139148
extern VALUE cUM;
140149
extern VALUE cMutex;
141150
extern VALUE cQueue;
151+
extern VALUE cAsyncOp;
142152

153+
struct um *um_get_machine(VALUE self);
143154
void um_setup(VALUE self, struct um *machine);
144155
void um_teardown(struct um *machine);
145156

@@ -178,6 +189,7 @@ struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op);
178189

179190
VALUE um_fiber_switch(struct um *machine);
180191
VALUE um_await(struct um *machine);
192+
void um_submit_cancel_op(struct um *machine, struct um_op *op);
181193
void um_cancel_and_wait(struct um *machine, struct um_op *op);
182194
int um_check_completion(struct um *machine, struct um_op *op);
183195

@@ -206,6 +218,12 @@ VALUE um_listen(struct um *machine, int fd, int backlog);
206218
VALUE um_getsockopt(struct um *machine, int fd, int level, int opt);
207219
VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value);
208220

221+
void um_async_op_set(VALUE self, struct um *machine, struct um_op *op);
222+
VALUE um_async_op_await(struct um_async_op *async_op);
223+
void um_async_op_cancel(struct um_async_op *async_op);
224+
225+
VALUE um_prep_timeout(struct um *machine, double interval);
226+
209227
struct um_mutex *Mutex_data(VALUE self);
210228
struct um_queue *Queue_data(VALUE self);
211229

ext/um/um_async_op.c

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#include "um.h"
2+
#include <stdlib.h>
3+
4+
VALUE um_prep_timeout(struct um *machine, double interval) {
5+
static ID ID_new = 0;
6+
if (!ID_new) ID_new = rb_intern("new");
7+
8+
struct um_op *op = malloc(sizeof(struct um_op));
9+
um_prep_op(machine, op, OP_TIMEOUT);
10+
op->ts = um_double_to_timespec(interval);
11+
op->flags = OP_F_TRANSIENT | OP_F_ASYNC;
12+
13+
VALUE obj = rb_funcall(cAsyncOp, rb_intern_const("new"), 0);
14+
um_async_op_set(obj, machine, op);
15+
16+
RB_OBJ_WRITE(machine->self, &op->async_op, obj);
17+
18+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
19+
io_uring_prep_timeout(sqe, &op->ts, 0, 0);
20+
21+
um_op_transient_add(machine, op);
22+
23+
return obj;
24+
}
25+
26+
VALUE um_async_op_await(struct um_async_op *async_op) {
27+
RB_OBJ_WRITE(async_op->machine->self, &async_op->op->fiber, rb_fiber_current());
28+
async_op->op->flags &= ~OP_F_ASYNC;
29+
30+
VALUE ret = um_fiber_switch(async_op->machine);
31+
if (!um_op_completed_p(async_op->op))
32+
um_cancel_and_wait(async_op->machine, async_op->op);
33+
34+
raise_if_exception(ret);
35+
return INT2NUM(async_op->op->result.res);
36+
}
37+
38+
void um_async_op_cancel(struct um_async_op *async_op) {
39+
um_submit_cancel_op(async_op->machine, async_op->op);
40+
}

ext/um/um_async_op_class.c

+136
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
#include "um.h"
2+
#include <stdlib.h>
3+
4+
VALUE cAsyncOp;
5+
6+
VALUE SYM_timeout;
7+
8+
static void AsyncOp_mark(void *ptr) {
9+
struct um_async_op *async_op = ptr;
10+
rb_gc_mark_movable(async_op->self);
11+
rb_gc_mark_movable(async_op->machine->self);
12+
}
13+
14+
static void AsyncOp_compact(void *ptr) {
15+
struct um_async_op *async_op = ptr;
16+
async_op->self = rb_gc_location(async_op->self);
17+
}
18+
19+
static size_t AsyncOp_size(const void *ptr) {
20+
return sizeof(struct um_async_op);
21+
}
22+
23+
static void AsyncOp_free(void *ptr) {
24+
struct um_async_op *async_op = ptr;
25+
um_op_free(async_op->machine, async_op->op);
26+
free(ptr);
27+
}
28+
29+
static const rb_data_type_t AsyncOp_type = {
30+
"UringMachine::AsyncOp",
31+
{AsyncOp_mark, AsyncOp_free, AsyncOp_size, AsyncOp_compact},
32+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
33+
};
34+
35+
static VALUE AsyncOp_allocate(VALUE klass) {
36+
struct um_async_op *async_op = malloc(sizeof(struct um_async_op));
37+
return TypedData_Wrap_Struct(klass, &AsyncOp_type, async_op);
38+
}
39+
40+
inline struct um_async_op *AsyncOp_data(VALUE self) {
41+
return RTYPEDDATA_DATA(self);
42+
}
43+
44+
VALUE AsyncOp_initialize(VALUE self) {
45+
struct um_async_op *async_op = AsyncOp_data(self);
46+
memset(async_op, 0, sizeof(struct um_async_op));
47+
async_op->self = self;
48+
return self;
49+
}
50+
51+
void um_async_op_set(VALUE self, struct um *machine, struct um_op *op) {
52+
struct um_async_op *async_op = AsyncOp_data(self);
53+
async_op->machine = machine;
54+
async_op->op = op;
55+
}
56+
57+
inline void raise_on_missing_op(struct um_async_op *async_op) {
58+
if (!async_op->op)
59+
rb_raise(rb_eRuntimeError, "Missing op");
60+
}
61+
62+
inline int async_op_is_done(struct um_async_op *async_op) {
63+
return (async_op->op->flags & OP_F_COMPLETED);
64+
}
65+
66+
VALUE AsyncOp_kind(VALUE self) {
67+
struct um_async_op *async_op = AsyncOp_data(self);
68+
raise_on_missing_op(async_op);
69+
70+
switch(async_op->op->kind) {
71+
case OP_TIMEOUT:
72+
return SYM_timeout;
73+
default:
74+
rb_raise(rb_eRuntimeError, "Invalid op kind");
75+
}
76+
}
77+
78+
VALUE AsyncOp_done_p(VALUE self) {
79+
struct um_async_op *async_op = AsyncOp_data(self);
80+
raise_on_missing_op(async_op);
81+
82+
return async_op_is_done(async_op) ? Qtrue : Qfalse;
83+
}
84+
85+
VALUE AsyncOp_result(VALUE self) {
86+
struct um_async_op *async_op = AsyncOp_data(self);
87+
raise_on_missing_op(async_op);
88+
89+
return async_op_is_done(async_op) ? INT2NUM(async_op->op->result.res) : Qnil;
90+
}
91+
92+
VALUE AsyncOp_cancelled_p(VALUE self) {
93+
struct um_async_op *async_op = AsyncOp_data(self);
94+
raise_on_missing_op(async_op);
95+
96+
if (!async_op_is_done(async_op)) return Qnil;
97+
98+
return (async_op->op->result.res == -ECANCELED) ? Qtrue : Qfalse;
99+
}
100+
101+
VALUE AsyncOp_await(VALUE self) {
102+
struct um_async_op *async_op = AsyncOp_data(self);
103+
raise_on_missing_op(async_op);
104+
105+
if (async_op_is_done(async_op))
106+
return INT2NUM(async_op->op->result.res);
107+
108+
return um_async_op_await(async_op);
109+
}
110+
111+
VALUE AsyncOp_cancel(VALUE self) {
112+
struct um_async_op *async_op = AsyncOp_data(self);
113+
raise_on_missing_op(async_op);
114+
115+
if (!async_op_is_done(async_op))
116+
um_async_op_cancel(async_op);
117+
118+
return self;
119+
}
120+
121+
void Init_AsyncOp(void) {
122+
cAsyncOp = rb_define_class_under(cUM, "AsyncOp", rb_cObject);
123+
rb_define_alloc_func(cAsyncOp, AsyncOp_allocate);
124+
125+
rb_define_method(cAsyncOp, "initialize", AsyncOp_initialize, 0);
126+
rb_define_method(cAsyncOp, "kind", AsyncOp_kind, 0);
127+
rb_define_method(cAsyncOp, "done?", AsyncOp_done_p, 0);
128+
rb_define_method(cAsyncOp, "result", AsyncOp_result, 0);
129+
rb_define_method(cAsyncOp, "cancelled?", AsyncOp_cancelled_p, 0);
130+
131+
rb_define_method(cAsyncOp, "await", AsyncOp_await, 0);
132+
rb_define_method(cAsyncOp, "join", AsyncOp_await, 0);
133+
rb_define_method(cAsyncOp, "cancel", AsyncOp_cancel, 0);
134+
135+
SYM_timeout = ID2SYM(rb_intern("timeout"));
136+
}

0 commit comments

Comments
 (0)