@@ -258,6 +258,28 @@ VALUE UM_queue_shift(VALUE self, VALUE queue) {
258
258
259
259
#endif
260
260
261
+ struct um_open_ctx {
262
+ VALUE self ;
263
+ VALUE fd ;
264
+ };
265
+
266
+ VALUE UM_open_ensure (VALUE arg ) {
267
+ struct um_open_ctx * ctx = (struct um_open_ctx * )arg ;
268
+ UM_close (ctx -> self , ctx -> fd );
269
+ return ctx -> self ;
270
+ }
271
+
272
+ VALUE UM_open (VALUE self , VALUE pathname , VALUE flags ) {
273
+ struct um * machine = get_machine (self );
274
+ VALUE ret = um_open (machine , pathname , NUM2INT (flags ), S_IRWXU );
275
+ if (rb_block_given_p ()) {
276
+ struct um_open_ctx ctx = { self , ret };
277
+ return rb_ensure (rb_yield , ret , UM_open_ensure , (VALUE )& ctx );
278
+ }
279
+ else
280
+ return ret ;
281
+ }
282
+
261
283
VALUE UM_kernel_version (VALUE self ) {
262
284
return INT2NUM (UM_KERNEL_VERSION );
263
285
}
@@ -269,41 +291,42 @@ void Init_UM(void) {
269
291
rb_define_alloc_func (cUM , UM_allocate );
270
292
271
293
rb_define_method (cUM , "initialize" , UM_initialize , 0 );
272
- rb_define_method (cUM , "setup_buffer_ring" , UM_setup_buffer_ring , 2 );
273
294
rb_define_method (cUM , "pending_count" , UM_pending_count , 0 );
295
+ rb_define_method (cUM , "setup_buffer_ring" , UM_setup_buffer_ring , 2 );
296
+ rb_define_singleton_method (cUM , "kernel_version" , UM_kernel_version , 0 );
297
+
274
298
275
- rb_define_method (cUM , "snooze" , UM_snooze , 0 );
276
- rb_define_method (cUM , "yield" , UM_yield , 0 );
277
299
rb_define_method (cUM , "schedule" , UM_schedule , 2 );
300
+ rb_define_method (cUM , "snooze" , UM_snooze , 0 );
278
301
rb_define_method (cUM , "timeout" , UM_timeout , 2 );
302
+ rb_define_method (cUM , "yield" , UM_yield , 0 );
279
303
280
- rb_define_method (cUM , "sleep" , UM_sleep , 1 );
304
+ rb_define_method (cUM , "close" , UM_close , 1 );
305
+ rb_define_method (cUM , "open" , UM_open , 2 );
281
306
rb_define_method (cUM , "read" , UM_read , -1 );
282
307
rb_define_method (cUM , "read_each" , UM_read_each , 2 );
308
+ rb_define_method (cUM , "sleep" , UM_sleep , 1 );
283
309
rb_define_method (cUM , "write" , UM_write , -1 );
284
- rb_define_method (cUM , "close" , UM_close , 1 );
285
310
286
311
rb_define_method (cUM , "accept" , UM_accept , 1 );
287
312
rb_define_method (cUM , "accept_each" , UM_accept_each , 1 );
288
- rb_define_method (cUM , "socket " , UM_socket , 4 );
313
+ rb_define_method (cUM , "bind " , UM_bind , 3 );
289
314
rb_define_method (cUM , "connect" , UM_connect , 3 );
290
- rb_define_method (cUM , "send" , UM_send , 4 );
315
+ rb_define_method (cUM , "getsockopt" , UM_getsockopt , 3 );
316
+ rb_define_method (cUM , "listen" , UM_listen , 2 );
291
317
rb_define_method (cUM , "recv" , UM_recv , 4 );
292
318
rb_define_method (cUM , "recv_each" , UM_recv_each , 3 );
293
- rb_define_method (cUM , "bind" , UM_bind , 3 );
294
- rb_define_method (cUM , "listen" , UM_listen , 2 );
295
- rb_define_method (cUM , "getsockopt" , UM_getsockopt , 3 );
319
+ rb_define_method (cUM , "send" , UM_send , 4 );
296
320
rb_define_method (cUM , "setsockopt" , UM_setsockopt , 4 );
321
+ rb_define_method (cUM , "socket" , UM_socket , 4 );
297
322
298
323
#ifdef HAVE_IO_URING_PREP_FUTEX
299
- rb_define_method (cUM , "synchronize" , UM_mutex_synchronize , 1 );
300
- rb_define_method (cUM , "push" , UM_queue_push , 2 );
301
324
rb_define_method (cUM , "pop" , UM_queue_pop , 1 );
302
- rb_define_method (cUM , "unshift " , UM_queue_unshift , 2 );
325
+ rb_define_method (cUM , "push " , UM_queue_push , 2 );
303
326
rb_define_method (cUM , "shift" , UM_queue_shift , 1 );
327
+ rb_define_method (cUM , "synchronize" , UM_mutex_synchronize , 1 );
328
+ rb_define_method (cUM , "unshift" , UM_queue_unshift , 2 );
304
329
#endif
305
330
306
- rb_define_singleton_method (cUM , "kernel_version" , UM_kernel_version , 0 );
307
-
308
331
um_define_net_constants (cUM );
309
332
}
0 commit comments