Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions lib/chat_models/chat_open_ai.ex
Original file line number Diff line number Diff line change
Expand Up @@ -837,7 +837,7 @@ defmodule LangChain.ChatModels.ChatOpenAI do
| MessageDelta.t()
| [MessageDelta.t()]
| {:error, String.t()}
def do_process_response(model, %{"choices" => [], "usage" => %{} = _usage} = data) do
def do_process_response(model, %{"choices" => _choices, "usage" => %{} = _usage} = data) do
case get_token_usage(data) do
%TokenUsage{} = token_usage ->
Callbacks.fire(model.callbacks, :on_llm_token_usage, [token_usage])
Expand All @@ -847,8 +847,7 @@ defmodule LangChain.ChatModels.ChatOpenAI do
:ok
end

# this stand-alone TokenUsage message is skipped and not returned
:skip
do_process_response(model, %{data | "usage" => nil})
end

def do_process_response(_model, %{"choices" => []}), do: :skip
Expand Down
43 changes: 42 additions & 1 deletion test/chat_models/chat_open_ai_test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -1130,7 +1130,7 @@ defmodule LangChain.ChatModels.ChatOpenAITest do

describe "do_process_response/2" do
setup do
model = ChatOpenAI.new(%{"model" => @test_model})
{:ok, model} = ChatOpenAI.new(%{"model" => @test_model})
%{model: model}
end

Expand Down Expand Up @@ -1465,6 +1465,47 @@ defmodule LangChain.ChatModels.ChatOpenAITest do
assert msg1.content == "Greetings!"
assert msg2.content == "Howdy!"
end

test "handles choices and usage data in the same chunk (DeepSeek's format)", %{model: model} do
handlers = %{
on_llm_token_usage: fn usage ->
send(self(), {:fired_token_usage, usage})
end
}

model = %{model | callbacks: [handlers]}

response = %{
"choices" => [
%{
"index" => 0,
"delta" => %{
"content" => ""
},
"logprobs" => nil,
"finish_reason" => "stop"
}
],
"usage" => %{
"prompt_tokens" => 11,
"completion_tokens" => 12,
"total_tokens" => 22,
"prompt_tokens_details" => %{
"cached_tokens" => 0
},
"prompt_cache_hit_tokens" => 0,
"prompt_cache_miss_tokens" => 11
}
}

[msg_delta] = ChatOpenAI.do_process_response(model, response)
assert %MessageDelta{} = msg_delta
assert msg_delta.role == :unknown
assert msg_delta.index == 0

assert_received {:fired_token_usage, usage}
assert %TokenUsage{input: 11, output: 12} = usage
end
end

describe "streaming examples" do
Expand Down