8
8
9
9
namespace DevProxy . Abstractions . LanguageModel ;
10
10
11
- public class LMStudioLanguageModelClient ( LanguageModelConfiguration ? configuration , ILogger logger ) : ILanguageModelClient
11
+ public class OpenAILanguageModelClient ( LanguageModelConfiguration ? configuration , ILogger logger ) : ILanguageModelClient
12
12
{
13
13
private readonly LanguageModelConfiguration ? _configuration = configuration ;
14
14
private readonly ILogger _logger = logger ;
15
15
private bool ? _lmAvailable ;
16
- private readonly Dictionary < string , OpenAICompletionResponse > _cacheCompletion = [ ] ;
17
16
private readonly Dictionary < ILanguageModelChatCompletionMessage [ ] , OpenAIChatCompletionResponse > _cacheChatCompletion = [ ] ;
18
17
19
18
public async Task < bool > IsEnabledAsync ( )
@@ -29,6 +28,8 @@ public async Task<bool> IsEnabledAsync()
29
28
30
29
private async Task < bool > IsEnabledInternalAsync ( )
31
30
{
31
+ using var scope = _logger . BeginScope ( nameof ( OpenAILanguageModelClient ) ) ;
32
+
32
33
if ( _configuration is null || ! _configuration . Enabled )
33
34
{
34
35
return false ;
@@ -50,20 +51,14 @@ private async Task<bool> IsEnabledInternalAsync()
50
51
51
52
try
52
53
{
53
- // check if lm is on
54
- using var client = new HttpClient ( ) ;
55
- var response = await client . GetAsync ( $ "{ _configuration . Url } /v1/models") ;
56
- _logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
57
-
58
- if ( ! response . IsSuccessStatusCode )
54
+ var testCompletion = await GenerateChatCompletionInternalAsync ( [ new ( )
59
55
{
60
- return false ;
61
- }
62
-
63
- var testCompletion = await GenerateCompletionInternalAsync ( "Are you there? Reply with a yes or no." ) ;
64
- if ( testCompletion ? . Error is not null )
56
+ Content = "Are you there? Reply with a yes or no." ,
57
+ Role = "user"
58
+ } ] ) ;
59
+ if ( testCompletion ? . ErrorMessage is not null )
65
60
{
66
- _logger . LogError ( "Error: {error}. Param: {param} " , testCompletion . Error . Message , testCompletion . Error . Param ) ;
61
+ _logger . LogError ( "Error: {error}" , testCompletion . ErrorMessage ) ;
67
62
return false ;
68
63
}
69
64
@@ -78,90 +73,41 @@ private async Task<bool> IsEnabledInternalAsync()
78
73
79
74
public async Task < ILanguageModelCompletionResponse ? > GenerateCompletionAsync ( string prompt , CompletionOptions ? options = null )
80
75
{
81
- using var scope = _logger . BeginScope ( nameof ( LMStudioLanguageModelClient ) ) ;
82
-
83
- if ( _configuration is null )
84
- {
85
- return null ;
86
- }
87
-
88
- if ( ! _lmAvailable . HasValue )
89
- {
90
- _logger . LogError ( "Language model availability is not checked. Call {isEnabled} first." , nameof ( IsEnabledAsync ) ) ;
91
- return null ;
92
- }
93
-
94
- if ( ! _lmAvailable . Value )
95
- {
96
- return null ;
97
- }
98
-
99
- if ( _configuration . CacheResponses && _cacheCompletion . TryGetValue ( prompt , out var cachedResponse ) )
100
- {
101
- _logger . LogDebug ( "Returning cached response for prompt: {prompt}" , prompt ) ;
102
- return cachedResponse ;
103
- }
104
-
105
- var response = await GenerateCompletionInternalAsync ( prompt , options ) ;
76
+ var response = await GenerateChatCompletionAsync ( [ new OpenAIChatCompletionMessage ( ) { Content = prompt , Role = "user" } ] , options ) ;
106
77
if ( response == null )
107
78
{
108
79
return null ;
109
80
}
110
- if ( response . Error is not null )
81
+ if ( response . ErrorMessage is not null )
111
82
{
112
- _logger . LogError ( "Error: {error}. Param: {param} " , response . Error . Message , response . Error . Param ) ;
83
+ _logger . LogError ( "Error: {error}" , response . ErrorMessage ) ;
113
84
return null ;
114
85
}
115
- else
116
- {
117
- if ( _configuration . CacheResponses && response . Response is not null )
118
- {
119
- _cacheCompletion [ prompt ] = response ;
120
- }
86
+ var openAIResponse = ( OpenAIChatCompletionResponse ) response ;
121
87
122
- return response ;
123
- }
124
- }
125
-
126
- private async Task < OpenAICompletionResponse ? > GenerateCompletionInternalAsync ( string prompt , CompletionOptions ? options = null )
127
- {
128
- Debug . Assert ( _configuration != null , "Configuration is null" ) ;
129
-
130
- try
88
+ return new OpenAICompletionResponse
131
89
{
132
- using var client = new HttpClient ( ) ;
133
- var url = $ "{ _configuration . Url } /v1/completions";
134
- _logger . LogDebug ( "Requesting completion. Prompt: {prompt}" , prompt ) ;
135
-
136
- var response = await client . PostAsJsonAsync ( url ,
137
- new
138
- {
139
- prompt ,
140
- model = _configuration . Model ,
141
- stream = false ,
142
- temperature = options ? . Temperature ?? 0.8 ,
143
- }
144
- ) ;
145
- _logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
146
-
147
- var res = await response . Content . ReadFromJsonAsync < OpenAICompletionResponse > ( ) ;
148
- if ( res is null )
90
+ Choices = openAIResponse . Choices ? . Select ( c => new OpenAICompletionResponseChoice
149
91
{
150
- return res ;
151
- }
152
- res . RequestUrl = url ;
153
- return res ;
154
- }
155
- catch ( Exception ex )
156
- {
157
- _logger . LogError ( ex , "Failed to generate completion" ) ;
158
- return null ;
159
- }
92
+ ContentFilterResults = c . ContentFilterResults ,
93
+ FinishReason = c . FinishReason ,
94
+ Index = c . Index ,
95
+ LogProbabilities = c . LogProbabilities ,
96
+ Text = c . Message . Content
97
+ } ) . ToArray ( ) ,
98
+ Created = openAIResponse . Created ,
99
+ Error = openAIResponse . Error ,
100
+ Id = openAIResponse . Id ,
101
+ Model = openAIResponse . Model ,
102
+ Object = openAIResponse . Object ,
103
+ PromptFilterResults = openAIResponse . PromptFilterResults ,
104
+ Usage = openAIResponse . Usage ,
105
+ } ;
160
106
}
161
107
162
- public async Task < ILanguageModelCompletionResponse ? > GenerateChatCompletionAsync ( ILanguageModelChatCompletionMessage [ ] messages )
108
+ public async Task < ILanguageModelCompletionResponse ? > GenerateChatCompletionAsync ( ILanguageModelChatCompletionMessage [ ] messages , CompletionOptions ? options = null )
163
109
{
164
- using var scope = _logger . BeginScope ( nameof ( LMStudioLanguageModelClient ) ) ;
110
+ using var scope = _logger . BeginScope ( nameof ( OpenAILanguageModelClient ) ) ;
165
111
166
112
if ( _configuration is null )
167
113
{
@@ -185,14 +131,14 @@ private async Task<bool> IsEnabledInternalAsync()
185
131
return cachedResponse ;
186
132
}
187
133
188
- var response = await GenerateChatCompletionInternalAsync ( messages ) ;
134
+ var response = await GenerateChatCompletionInternalAsync ( [ .. messages . Select ( m => ( OpenAIChatCompletionMessage ) m ) ] , options ) ;
189
135
if ( response == null )
190
136
{
191
137
return null ;
192
138
}
193
139
if ( response . Error is not null )
194
140
{
195
- _logger . LogError ( "Error: {error}. Param : {param }" , response . Error . Message , response . Error . Param ) ;
141
+ _logger . LogError ( "Error: {error}. Code : {code }" , response . Error . Message , response . Error . Code ) ;
196
142
return null ;
197
143
}
198
144
else
@@ -206,24 +152,25 @@ private async Task<bool> IsEnabledInternalAsync()
206
152
}
207
153
}
208
154
209
- private async Task < OpenAIChatCompletionResponse ? > GenerateChatCompletionInternalAsync ( ILanguageModelChatCompletionMessage [ ] messages )
155
+ private async Task < OpenAIChatCompletionResponse ? > GenerateChatCompletionInternalAsync ( OpenAIChatCompletionMessage [ ] messages , CompletionOptions ? options = null )
210
156
{
211
157
Debug . Assert ( _configuration != null , "Configuration is null" ) ;
212
158
213
159
try
214
160
{
215
161
using var client = new HttpClient ( ) ;
216
- var url = $ "{ _configuration . Url } /v1/ chat/completions";
162
+ var url = $ "{ _configuration . Url } /chat/completions";
217
163
_logger . LogDebug ( "Requesting chat completion. Message: {lastMessage}" , messages . Last ( ) . Content ) ;
218
164
219
- var response = await client . PostAsJsonAsync ( url ,
220
- new
221
- {
222
- messages ,
223
- model = _configuration . Model ,
224
- stream = false
225
- }
226
- ) ;
165
+ var payload = new OpenAIChatCompletionRequest
166
+ {
167
+ Messages = messages ,
168
+ Model = _configuration . Model ,
169
+ Stream = false ,
170
+ Temperature = options ? . Temperature
171
+ } ;
172
+
173
+ var response = await client . PostAsJsonAsync ( url , payload ) ;
227
174
_logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
228
175
229
176
var res = await response . Content . ReadFromJsonAsync < OpenAIChatCompletionResponse > ( ) ;
@@ -243,7 +190,7 @@ private async Task<bool> IsEnabledInternalAsync()
243
190
}
244
191
}
245
192
246
- internal static class CacheChatCompletionExtensions
193
+ internal static class OpenAICacheChatCompletionExtensions
247
194
{
248
195
public static OpenAIChatCompletionMessage [ ] ? GetKey (
249
196
this Dictionary < OpenAIChatCompletionMessage [ ] , OpenAIChatCompletionResponse > cache ,
0 commit comments