Skip to content

Commit 407f3c1

Browse files
author
Michael Agun
committed
Update ringbuffer API.
1 parent 938d642 commit 407f3c1

File tree

1 file changed

+153
-17
lines changed

1 file changed

+153
-17
lines changed

docs/RingBuffer.md

+153-17
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,30 @@ Mapped memory consumer:
1919
3. Directly read records from the producer pages (and update consumer offset as we read).
2020
4. Call `WaitForSingleObject`/`WaitForMultipleObject` as needed to wait for new data to be available.
2121

22-
## API Changes
22+
### Differences from linux API
23+
24+
Linux has only polling ring buffer consumers, even when callbacks are used.
25+
On linux the user code can call `ring_buffer__consume()` to invoke the callback on all available data,
26+
and `ring_buffer__poll()` to wait for data if needed and then consume available data.
27+
Linux consumers can also directly read from the mapped memory by using `mmap()` to map the data
28+
into user space and `ring_buffer__epoll_fd()` to get an epoll wait handle.
29+
30+
On Windows asynchronous events are supported by default,
31+
so nothing extra needs to be done for callbacks to be invoked.
32+
33+
If the `RINGBUF_FLAG_NO_AUTO_CALLBACK` flag is set, callbacks will not automatically be called and `ring_buffer__poll()`
34+
should be called to poll for available data and invoke the callback. On Windows a timeout of zero can be passed to
35+
`ring_buffer__poll()` to get the same behaviour as `ring_buffer__consume()`.
36+
37+
For direct memory mapped consumers on Windows, use `ebpf_ring_buffer_get_buffer` to get pointers to the producer and consumer
38+
pages mapped into user space, and `ebpf_ring_buffer_get_wait_handle()` to get the SynchronizationEvent (auto-reset) KEVENT
39+
to use with `WaitForSingleObject`/`WaitForMultipleObject`.
40+
41+
Similar to the linux memory layout, the first page of the producer and consumer memory is the "producer page" and "consumer page",
42+
which contain the 64 bit producer and consumer offsets as the first 8 bytes.
43+
Only the producer may update the producer offset, and only the consumer may update the consumer offset.
44+
45+
## ebpf-for-windows API Changes
2346

2447
### Changes to ebpf helper functions
2548

@@ -78,22 +101,27 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
78101
const struct ring_buffer_opts *opts);
79102
80103
/**
81-
* @brief poll ringbuf for new data (NOT CURRENTLY SUPPORTED)
104+
* @brief poll ringbuf for new data
82105
* Poll for available data and consume records, if any are available.
83106
* Returns number of records consumed (or INT_MAX, whichever is less), or
84107
* negative number, if any of the registered callbacks returned error.
85108
*
109+
* If timeout_ms is zero, poll will not wait but only invoke the callback on records that are ready.
110+
* If timeout_ms is -1, poll will wait until data is ready (no timeout).
111+
*
112+
* This function is only supported when the RINGBUF_FLAG_NO_AUTO_CALLBACK flag is set.
113+
*
86114
* @param[in] rb Pointer to ring buffer manager.
87115
* @param[in] timeout_ms maximum time to wait for (in milliseconds).
88116
*
117+
* @returns number of records consumed, INT_MAX, or a negative number on error
89118
*/
90119
int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
91120
92121
/**
93122
* @brief Frees a ring buffer manager.
94123
*
95124
* @param[in] rb Pointer to ring buffer manager to be freed.
96-
*
97125
*/
98126
void ring_buffer__free(struct ring_buffer *rb);
99127
```
@@ -104,24 +132,99 @@ void ring_buffer__free(struct ring_buffer *rb);
104132
/**
105133
* get pointers to mapped producer and consumer pages
106134
*
135+
* @param[in] map_fd File descriptor to ring buffer map.
107136
* @param[out] producer pointer* to start of read-only mapped producer pages
108137
* @param[out] consumer pointer* to start of read-write mapped consumer page
138+
*
139+
* @returns EBPF_SUCCESS on success, or error
109140
*/
110141
ebpf_result_t ebpf_ring_buffer_get_buffer(fd_t map_fd, void **producer, void **consumer);
111142

112143
/**
113144
* get the wait handle to use with WaitForSingleObject/WaitForMultipleObject
114145
*
146+
* @param[in] map_fd File descriptor to ring buffer map.
147+
*
115148
* @returns Wait handle
116149
*/
117150
HANDLE ebpf_ring_buffer_get_wait_handle(fd_t map_fd);
118151
```
119152
153+
### New user-space helpers for memory mapped consumer
154+
155+
```c
156+
/**
157+
* The below helpers simplify memory-mapped consumer logic
158+
* by abstracting operations on the producer and consumer offsets.
159+
*/
160+
161+
/**
162+
* Get pointer to consumer offset from consumer page.
163+
*
164+
* @param[in] cons pointer* to start of read-write mapped consumer page
165+
*
166+
* @returns Pointer to consumer offset
167+
*/
168+
uint64_t* rb__consumer_offset(void *cons);
169+
170+
/**
171+
* Get pointer to producer offset from producer page.
172+
*
173+
* @param[in] prod pointer* to start of read-only mapped producer pages
174+
*
175+
* @returns Pointer to producer offset
176+
*/
177+
volatile const uint64_t* rb__producer_offset(volatile const void *prod);
178+
179+
/**
180+
* Check whether consumer offset == producer offset.
181+
*
182+
* Note that not empty doesn't mean data is ready, just that there are records that have been allocated.
183+
* You still need to check the locked and discarded bits of the record header to determine if a record is ready.
184+
*
185+
* @param[in] cons pointer* to start of read-write mapped consumer page
186+
* @param[in] prod pointer* to start of read-only mapped producer pages
187+
*
188+
* @returns 0 if ring buffer is empty, 1 otherwise
189+
*/
190+
int rb__empty(volatile const void *prod, const void *cons);
191+
192+
/**
193+
* Clear the ring buffer by flushing all completed and in-progress records.
194+
*
195+
* This helper just sets the consumer offset to the producer offset
196+
*
197+
* @param[in] prod pointer* to start of read-only mapped producer pages
198+
* @param[in,out] cons pointer* to start of read-write mapped consumer page
199+
*/
200+
void rb__flush(volatile const void *prod, void *cons);
201+
202+
/**
203+
* Advance consumer offset to next record (if any)
204+
*
205+
* @param[in] prod pointer* to start of read-only mapped producer pages
206+
* @param[in,out] cons pointer* to start of read-write mapped consumer page
207+
*/
208+
void rb__next_record(volatile const void *prod, void *cons);
209+
210+
/**
211+
* Get record at current ringbuffer offset.
212+
213+
* @param[in] prod pointer* to start of read-only mapped producer pages
214+
* @param[in] cons pointer* to start of read-write mapped consumer page
215+
*
216+
* @returns E_SUCCESS (0) if record ready, E_LOCKED if record still locked, E_EMPTY if consumer has caught up.
217+
*/
218+
int rb__get_record(volatile const void *prod, const void *cons, volatile const void** record);
219+
220+
```
221+
120222
## Ringbuffer consumer
121223

122224
### mapped memory consumer example
123225

124-
This consumer directly accesses the records from the producer memory and directly updates the consumer offset to show the logic.
226+
This consumer directly accesses the records from the producer memory and directly updates the consumer offset to show the logic. Normally user code should use the ring buffer helpers
227+
(see second example below) to simplify the logic.
125228

126229
```c++
127230

@@ -244,16 +347,19 @@ Exit:
244347
245348
### Simplified polling ringbuf consumer
246349
247-
This consumer uses some possible helpers to simplify the above logic (might also want timeout).
350+
This consumer uses the newly added helpers to consume the ring buffer.
248351
249352
```c
250-
//Note: the below theoretical helpers would only need access to producers/consumer pages (offsets and data pages)
251-
//rb__empty(prod,cons) - check whether consumer offset == consumer offset (!empty doesn't mean data is ready)
252-
//rb__flush(prod,cons) - just set consumer offset = producer offset (skips all completed/in-progress records)
253-
//rb__next_record(prod,cons) - advance consumer offset to next record
254-
//rb__get_record(prod,cons,&record) - get pointer to current record (if any), skipping discarded records
255-
//Returns E_SUCCESS (0) if record ready, E_LOCKED if record still locked, E_EMPTY if consumer has caught up.
353+
// Initialize wait handle for map.
354+
HANDLE wait_handle = ebpf_ring_buffer_get_wait_handle(map_fd);
355+
if (!wait_handle) {
356+
// … log error …
357+
goto Exit;
358+
}
359+
360+
uint32_t wait_err = 0;
256361
362+
// Consumer loop.
257363
for(;;) {
258364
for(; !(err=rb__get_record(prod,cons,&record)); rb__next_record(prod,cons)) {
259365
// Data is now in record->data[0 ... record->length-1].
@@ -267,12 +373,42 @@ for(;;) {
267373
// … log error …
268374
break;
269375
}
270-
if (err == /* Handled errors, e.g. timeout */) {
271-
// … log error and continue (we might still have record(s) to read) …
272-
} else if (err != E_SUCCESS) {
273-
// … log error …
274-
break;
376+
DWORD wait_status = WaitForSingleObject(wait_handle, INFINITE);
377+
378+
if (wait_status != WAIT_OBJECT_0) { // No notification
379+
wait_err = GetLastError();
380+
if (wait_err == /* terminal error */) {
381+
// … log error …
382+
break;
383+
}
275384
}
276385
}
277-
return err;
386+
387+
```
388+
389+
### Polling ring buffer consumer (linux-style)
390+
391+
```c
392+
// sample callback
393+
int ring_buffer_sample_fn(void *ctx, void *data, size_t size) {
394+
// … business logic to handle record …
395+
}
396+
397+
// consumer code
398+
struct ring_buffer_opts opts;
399+
opts.sz = sizeof(opts);
400+
opts.flags = RINGBUF_FLAG_NO_AUTO_CALLBACK; //no automatic callbacks
401+
402+
fd_t map_fd = bpf_obj_get(rb_map_name.c_str());
403+
if (map_fd == ebpf_fd_invalid) return 1;
404+
405+
struct ring_buffer *rb = ring_buffer__new(map_fd, ring_buffer_sample_fn sample_cb, NULL);
406+
if (rb == NULL) return 1;
407+
408+
// now loop as long as there isn't an error
409+
while(ring_buffer__poll(rb, -1) >= 0) {
410+
// data processed by event callback
411+
}
412+
413+
ring_buffer__free(rb);
278414
```

0 commit comments

Comments
 (0)