@@ -16,14 +16,14 @@ extern "C" {
16
16
typedef void * ggml_backend_buffer_type_context_t ;
17
17
18
18
struct ggml_backend_buffer_type_i {
19
- const char * (* get_name ) (ggml_backend_buffer_type_t buft );
20
- ggml_backend_buffer_t (* alloc_buffer ) (ggml_backend_buffer_type_t buft , size_t size );
21
- size_t (* get_alignment ) (ggml_backend_buffer_type_t buft ); // tensor alignment
22
- size_t (* get_alloc_size ) (ggml_backend_buffer_type_t buft , const struct ggml_tensor * tensor ); // data size needed to allocate the tensor, including padding
23
- bool (* supports_backend )(ggml_backend_buffer_type_t buft , ggml_backend_t backend ); // check if the buffer type is usable by the backend
19
+ const char * (* GGML_CALL get_name ) (ggml_backend_buffer_type_t buft );
20
+ ggml_backend_buffer_t (* GGML_CALL alloc_buffer ) (ggml_backend_buffer_type_t buft , size_t size );
21
+ size_t (* GGML_CALL get_alignment ) (ggml_backend_buffer_type_t buft ); // tensor alignment
22
+ size_t (* GGML_CALL get_alloc_size ) (ggml_backend_buffer_type_t buft , const struct ggml_tensor * tensor ); // data size needed to allocate the tensor, including padding
23
+ bool (* GGML_CALL supports_backend )(ggml_backend_buffer_type_t buft , ggml_backend_t backend ); // check if the buffer type is usable by the backend
24
24
// check if tensor data is in host memory
25
25
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
26
- bool (* is_host ) (ggml_backend_buffer_type_t buft );
26
+ bool (* GGML_CALL is_host ) (ggml_backend_buffer_type_t buft );
27
27
};
28
28
29
29
struct ggml_backend_buffer_type {
@@ -35,15 +35,15 @@ extern "C" {
35
35
typedef void * ggml_backend_buffer_context_t ;
36
36
37
37
struct ggml_backend_buffer_i {
38
- const char * (* get_name ) (ggml_backend_buffer_t buffer );
39
- void (* free_buffer )(ggml_backend_buffer_t buffer );
40
- void * (* get_base ) (ggml_backend_buffer_t buffer );
41
- void (* init_tensor )(ggml_backend_buffer_t buffer , struct ggml_tensor * tensor );
42
- void (* set_tensor ) (ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size );
43
- void (* get_tensor ) (ggml_backend_buffer_t buffer , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size );
44
- bool (* cpy_tensor ) (ggml_backend_buffer_t buffer , const struct ggml_tensor * src , struct ggml_tensor * dst ); // dst is in the buffer, src may be in any buffer
45
- void (* clear ) (ggml_backend_buffer_t buffer , uint8_t value );
46
- void (* reset ) (ggml_backend_buffer_t buffer ); // reset any internal state due to tensor initialization, such as tensor extras
38
+ const char * (* GGML_CALL get_name ) (ggml_backend_buffer_t buffer );
39
+ void (* GGML_CALL free_buffer )(ggml_backend_buffer_t buffer );
40
+ void * (* GGML_CALL get_base ) (ggml_backend_buffer_t buffer );
41
+ void (* GGML_CALL init_tensor )(ggml_backend_buffer_t buffer , struct ggml_tensor * tensor );
42
+ void (* GGML_CALL set_tensor ) (ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size );
43
+ void (* GGML_CALL get_tensor ) (ggml_backend_buffer_t buffer , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size );
44
+ bool (* GGML_CALL cpy_tensor ) (ggml_backend_buffer_t buffer , const struct ggml_tensor * src , struct ggml_tensor * dst ); // dst is in the buffer, src may be in any buffer
45
+ void (* GGML_CALL clear ) (ggml_backend_buffer_t buffer , uint8_t value );
46
+ void (* GGML_CALL reset ) (ggml_backend_buffer_t buffer ); // reset any internal state due to tensor initialization, such as tensor extras
47
47
};
48
48
49
49
struct ggml_backend_buffer {
@@ -54,7 +54,7 @@ extern "C" {
54
54
enum ggml_backend_buffer_usage usage ;
55
55
};
56
56
57
- ggml_backend_buffer_t ggml_backend_buffer_init (
57
+ GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init (
58
58
ggml_backend_buffer_type_t buft ,
59
59
struct ggml_backend_buffer_i iface ,
60
60
ggml_backend_buffer_context_t context ,
@@ -70,31 +70,31 @@ extern "C" {
70
70
typedef void * ggml_backend_context_t ;
71
71
72
72
struct ggml_backend_i {
73
- const char * (* get_name )(ggml_backend_t backend );
73
+ const char * (* GGML_CALL get_name )(ggml_backend_t backend );
74
74
75
- void (* free )(ggml_backend_t backend );
75
+ void (* GGML_CALL free )(ggml_backend_t backend );
76
76
77
77
// buffer allocation
78
- ggml_backend_buffer_type_t (* get_default_buffer_type )(ggml_backend_t backend );
78
+ ggml_backend_buffer_type_t (* GGML_CALL get_default_buffer_type )(ggml_backend_t backend );
79
79
80
80
// (optional) asynchronous tensor data access
81
- void (* set_tensor_async )(ggml_backend_t backend , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size );
82
- void (* get_tensor_async )(ggml_backend_t backend , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size );
83
- bool (* cpy_tensor_async )(ggml_backend_t backend , const struct ggml_tensor * src , struct ggml_tensor * dst );
81
+ void (* GGML_CALL set_tensor_async )(ggml_backend_t backend , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size );
82
+ void (* GGML_CALL get_tensor_async )(ggml_backend_t backend , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size );
83
+ bool (* GGML_CALL cpy_tensor_async )(ggml_backend_t backend , const struct ggml_tensor * src , struct ggml_tensor * dst );
84
84
85
85
// (optional) complete all pending operations
86
- void (* synchronize )(ggml_backend_t backend );
86
+ void (* GGML_CALL synchronize )(ggml_backend_t backend );
87
87
88
88
// compute graph with a plan
89
- ggml_backend_graph_plan_t (* graph_plan_create ) (ggml_backend_t backend , const struct ggml_cgraph * cgraph );
90
- void (* graph_plan_free ) (ggml_backend_t backend , ggml_backend_graph_plan_t plan );
91
- void (* graph_plan_compute )(ggml_backend_t backend , ggml_backend_graph_plan_t plan );
89
+ ggml_backend_graph_plan_t (* GGML_CALL graph_plan_create ) (ggml_backend_t backend , const struct ggml_cgraph * cgraph );
90
+ void (* GGML_CALL graph_plan_free ) (ggml_backend_t backend , ggml_backend_graph_plan_t plan );
91
+ void (* GGML_CALL graph_plan_compute )(ggml_backend_t backend , ggml_backend_graph_plan_t plan );
92
92
93
93
// compute graph without a plan (async)
94
- bool (* graph_compute )(ggml_backend_t backend , struct ggml_cgraph * cgraph );
94
+ bool (* GGML_CALL graph_compute )(ggml_backend_t backend , struct ggml_cgraph * cgraph );
95
95
96
96
// check if the backend supports an operation
97
- bool (* supports_op )(ggml_backend_t backend , const struct ggml_tensor * op );
97
+ bool (* GGML_CALL supports_op )(ggml_backend_t backend , const struct ggml_tensor * op );
98
98
};
99
99
100
100
struct ggml_backend {
@@ -107,9 +107,9 @@ extern "C" {
107
107
// Backend registry
108
108
//
109
109
110
- typedef ggml_backend_t (* ggml_backend_init_fn )(const char * params , void * user_data );
110
+ typedef ggml_backend_t (* GGML_CALL ggml_backend_init_fn )(const char * params , void * user_data );
111
111
112
- void ggml_backend_register (const char * name , ggml_backend_init_fn init_fn , ggml_backend_buffer_type_t default_buffer_type , void * user_data );
112
+ GGML_CALL void ggml_backend_register (const char * name , ggml_backend_init_fn init_fn , ggml_backend_buffer_type_t default_buffer_type , void * user_data );
113
113
114
114
#ifdef __cplusplus
115
115
}
0 commit comments