diff --git a/packages/datadog-instrumentations/src/openai.js b/packages/datadog-instrumentations/src/openai.js index e41db136854..401a231f5c7 100644 --- a/packages/datadog-instrumentations/src/openai.js +++ b/packages/datadog-instrumentations/src/openai.js @@ -306,44 +306,33 @@ for (const shim of V4_PACKAGE_SHIMS) { return ch.start.runStores(ctx, () => { const apiProm = methodFn.apply(this, arguments) + if (baseResource === 'chat.completions' && typeof apiProm._thenUnwrap === 'function') { + // this should only ever be invoked from a client.beta.chat.completions.parse call + shimmer.wrap(apiProm, '_thenUnwrap', origApiPromThenUnwrap => function () { + // TODO(sam.brenner): I wonder if we can patch the APIPromise prototype instead, although + // we might not have access to everything we need... + + // this is a new apipromise instance + const unwrappedPromise = origApiPromThenUnwrap.apply(this, arguments) + + shimmer.wrap(unwrappedPromise, 'parse', origApiPromParse => function () { + const parsedPromise = origApiPromParse.apply(this, arguments) + .then(body => Promise.all([this.responsePromise, body])) + + return handleUnwrappedAPIPromise(parsedPromise, ctx, stream, n) + }) + + return unwrappedPromise + }) + } + // wrapping `parse` avoids problematic wrapping of `then` when trying to call // `withResponse` in userland code after. This way, we can return the whole `APIPromise` shimmer.wrap(apiProm, 'parse', origApiPromParse => function () { - return origApiPromParse.apply(this, arguments) - // the original response is wrapped in a promise, so we need to unwrap it + const parsedPromise = origApiPromParse.apply(this, arguments) .then(body => Promise.all([this.responsePromise, body])) - .then(([{ response, options }, body]) => { - if (stream) { - if (body.iterator) { - shimmer.wrap(body, 'iterator', wrapStreamIterator(response, options, n, ctx)) - } else { - shimmer.wrap( - body.response.body, Symbol.asyncIterator, wrapStreamIterator(response, options, n, ctx) - ) - } - } else { - finish(ctx, { - headers: response.headers, - data: body, - request: { - path: response.url, - method: options.method - } - }) - } - return body - }) - .catch(error => { - finish(ctx, undefined, error) - - throw error - }) - .finally(() => { - // maybe we don't want to unwrap here in case the promise is re-used? - // other hand: we want to avoid resource leakage - shimmer.unwrap(apiProm, 'parse') - }) + return handleUnwrappedAPIPromise(parsedPromise, ctx, stream, n) }) ch.end.publish(ctx) @@ -356,6 +345,37 @@ for (const shim of V4_PACKAGE_SHIMS) { }) } +function handleUnwrappedAPIPromise (apiProm, ctx, stream, n) { + return apiProm + .then(([{ response, options }, body]) => { + if (stream) { + if (body.iterator) { + shimmer.wrap(body, 'iterator', wrapStreamIterator(response, options, n, ctx)) + } else { + shimmer.wrap( + body.response.body, Symbol.asyncIterator, wrapStreamIterator(response, options, n, ctx) + ) + } + } else { + finish(ctx, { + headers: response.headers, + data: body, + request: { + path: response.url, + method: options.method + } + }) + } + + return body + }) + .catch(error => { + finish(ctx, undefined, error) + + throw error + }) +} + function finish (ctx, response, error) { if (error) { ctx.error = error diff --git a/packages/datadog-plugin-openai/test/index.spec.js b/packages/datadog-plugin-openai/test/index.spec.js index 03ac66fb2e5..92a1f360cd4 100644 --- a/packages/datadog-plugin-openai/test/index.spec.js +++ b/packages/datadog-plugin-openai/test/index.spec.js @@ -2821,7 +2821,10 @@ describe('Plugin', () => { } if (semver.satisfies(realVersion, '>=4.0.0')) { - const result = await openai.chat.completions.create(params) + const prom = openai.chat.completions.create(params) + expect(prom).to.have.property('withResponse') + + const result = await prom expect(result.id).to.eql('chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN') expect(result.model).to.eql('gpt-3.5-turbo-0301') @@ -3786,6 +3789,55 @@ describe('Plugin', () => { } }) } + + if (semver.intersects('>=4.59.0', version)) { + it('makes a successful call with the beta chat completions', async () => { + nock('https://api.openai.com:443') + .post('/v1/chat/completions') + .reply(200, { + id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', + object: 'chat.completion', + created: 1684188020, + model: 'gpt-4o', + usage: { + prompt_tokens: 37, + completion_tokens: 10, + total_tokens: 47 + }, + choices: [ + { + message: { + role: 'assistant', + content: 'I am doing well, how about you?' + }, + finish_reason: 'stop', + index: 0 + } + ] + }) + + const checkTraces = agent + .use(traces => { + const span = traces[0][0] + expect(span).to.have.property('name', 'openai.request') + }) + + const prom = openai.beta.chat.completions.parse({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }], + temperature: 0.5, + stream: false + }) + + expect(prom).to.have.property('withResponse') + + const response = await prom + + expect(response.choices[0].message.content).to.eql('I am doing well, how about you?') + + await checkTraces + }) + } }) }) })