Skip to content

Commit 51fd6a4

Browse files
authored
add on chunk events to genai spans (#200)
* add on chunk events to genai spans * small update in doc comment * wrap add event in try except
1 parent a779588 commit 51fd6a4

File tree

3 files changed

+15
-3
lines changed

3 files changed

+15
-3
lines changed

src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,10 @@ def _build_from_streaming_response(
384384
aggregated_usage_metadata = defaultdict(int)
385385
model_version = None
386386
for chunk in response:
387+
try:
388+
span.add_event("llm.content.completion.chunk")
389+
except Exception:
390+
pass
387391
# Important: do all processing in a separate sync function, that is
388392
# wrapped in @dont_throw. If we did it here, the @dont_throw on top of
389393
# this function would not be able to catch the errors, as they are
@@ -434,6 +438,10 @@ async def _abuild_from_streaming_response(
434438
aggregated_usage_metadata = defaultdict(int)
435439
model_version = None
436440
async for chunk in response:
441+
try:
442+
span.add_event("llm.content.completion.chunk")
443+
except Exception:
444+
pass
437445
# Important: do all processing in a separate sync function, that is
438446
# wrapped in @dont_throw. If we did it here, the @dont_throw on top of
439447
# this function would not be able to catch the errors, as they are

src/lmnr/sdk/laminar.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -757,12 +757,12 @@ def set_span_attributes(
757757
instrumentation.
758758
Example:
759759
```python
760-
with L.start_as_current_span(
760+
with Laminar.start_as_current_span(
761761
name="my_span_name", input=input["messages"], span_type="LLM"
762762
):
763763
response = await my_custom_call_to_openai(input)
764-
L.set_span_output(response["choices"][0]["message"]["content"])
765-
L.set_span_attributes({
764+
Laminar.set_span_output(response["choices"][0]["message"]["content"])
765+
Laminar.set_span_attributes({
766766
Attributes.PROVIDER: 'openai',
767767
Attributes.REQUEST_MODEL: input["model"],
768768
Attributes.RESPONSE_MODEL: response["model"],

tests/test_google_genai.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -879,8 +879,10 @@ def test_google_genai_streaming(span_exporter: InMemorySpanExporter):
879879
],
880880
)
881881
final_response = ""
882+
chunk_count = 0
882883
for chunk in stream:
883884
final_response += chunk.text or ""
885+
chunk_count += 1
884886

885887
spans = span_exporter.get_finished_spans()
886888
assert len(spans) == 1
@@ -917,6 +919,8 @@ def test_google_genai_streaming(span_exporter: InMemorySpanExporter):
917919
assert span.attributes["gen_ai.usage.input_tokens"] == 7
918920
assert span.attributes["gen_ai.usage.output_tokens"] == 166
919921
assert span.attributes["llm.usage.total_tokens"] == 175 # 173 + 2 (thinking tokens)
922+
assert len(span.events) == chunk_count
923+
assert all(event.name == "llm.content.completion.chunk" for event in span.events)
920924

921925

922926
@pytest.mark.vcr

0 commit comments

Comments
 (0)