@@ -213,7 +213,7 @@ class OpenAIMonitor {
213
213
public createChatCompletion = async (
214
214
body : ChatCompletionCreateParams ,
215
215
options ?: RequestOptions ,
216
- additionalLogs ?: Openlayer . RequestOptions < any > | undefined ,
216
+ additionalLogs ?: Openlayer . RequestOptions | undefined ,
217
217
) : Promise < ChatCompletion | Stream < ChatCompletionChunk > > => {
218
218
// Start a timer to measure latency
219
219
const startTime = Date . now ( ) ;
@@ -309,7 +309,7 @@ class OpenAIMonitor {
309
309
public createCompletion = async (
310
310
body : CompletionCreateParams ,
311
311
options ?: RequestOptions ,
312
- additionalLogs ?: Openlayer . RequestOptions < any > | undefined ,
312
+ additionalLogs ?: Openlayer . RequestOptions | undefined ,
313
313
) : Promise < Completion | Stream < Completion > > => {
314
314
if ( ! body . prompt ) {
315
315
console . error ( 'No prompt provided.' ) ;
@@ -407,7 +407,7 @@ class OpenAIMonitor {
407
407
* @param {Openlayer.RequestOptions<any> | undefined } [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
408
408
* @returns {Promise<void> } A promise that resolves when the run data has been successfully published to Openlayer.
409
409
*/
410
- public async monitorThreadRun ( run : Run , additionalLogs ?: Openlayer . RequestOptions < any > | undefined ) {
410
+ public async monitorThreadRun ( run : Run , additionalLogs ?: Openlayer . RequestOptions | undefined ) {
411
411
if ( run . status !== 'completed' || this . openlayerInferencePipelineId . length === 0 ) {
412
412
return ;
413
413
}
0 commit comments