Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions engine/baml-lib/baml-types/src/tracing/events.rs
Original file line number Diff line number Diff line change
Expand Up @@ -593,6 +593,7 @@ pub struct LLMUsage {
pub input_tokens: Option<u64>,
pub output_tokens: Option<u64>,
pub total_tokens: Option<u64>,
pub cached_input_tokens: Option<u64>,
}

#[cfg(test)]
Expand Down
1 change: 1 addition & 0 deletions engine/baml-rpc/src/runtime_api/trace_event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,4 +183,5 @@ pub struct LLMUsage {
pub input_tokens: Option<u64>,
pub output_tokens: Option<u64>,
pub total_tokens: Option<u64>,
pub cached_input_tokens: Option<u64>,
}
1 change: 1 addition & 0 deletions engine/baml-runtime/src/internal/llm_client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ pub struct LLMCompleteResponseMetadata {
pub prompt_tokens: Option<u64>,
pub output_tokens: Option<u64>,
pub total_tokens: Option<u64>,
pub cached_input_tokens: Option<u64>,
}

// This is how the response gets logged if you print the result to the console.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ pub fn parse_anthropic_response<C: WithClient + RequestBuilder>(
prompt_tokens: Some(response.usage.input_tokens),
output_tokens: Some(response.usage.output_tokens),
total_tokens: Some(response.usage.input_tokens + response.usage.output_tokens),
cached_input_tokens: response.usage.cache_read_input_tokens,
},
})
}
Expand Down Expand Up @@ -137,6 +138,7 @@ pub fn scan_anthropic_response_stream(
inner.prompt_tokens = Some(body.usage.input_tokens);
inner.output_tokens = Some(body.usage.output_tokens);
inner.total_tokens = Some(body.usage.input_tokens + body.usage.output_tokens);
inner.cached_input_tokens = body.usage.cache_read_input_tokens;
}
MessageChunk::ContentBlockDelta(event) => {
if let super::types::ContentBlockDelta::TextDelta { text } = event.delta {
Expand All @@ -153,6 +155,7 @@ pub fn scan_anthropic_response_stream(
inner.finish_reason = body.delta.stop_reason.clone();
inner.output_tokens = Some(body.usage.output_tokens);
inner.total_tokens = Some(inner.prompt_tokens.unwrap_or(0) + body.usage.output_tokens);
inner.cached_input_tokens = body.usage.cache_read_input_tokens;
}
MessageChunk::MessageStop => (),
MessageChunk::Error { error } => {
Expand Down Expand Up @@ -218,6 +221,7 @@ mod tests {
prompt_tokens: Some(321),
output_tokens: Some(158),
total_tokens: Some(479),
cached_input_tokens: Some(0),
},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ pub enum AnthropicMessageContent {
pub struct AnthropicUsage {
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_creation_input_tokens: Option<u64>,
pub cache_read_input_tokens: Option<u64>,
}

#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
Expand Down Expand Up @@ -200,7 +202,7 @@ pub struct MessageDeltaChunk {
/// The result of this stream.
pub delta: StreamStop,
/// The billing and rate-limit usage of this stream.
pub usage: DeltaUsage,
pub usage: AnthropicUsage,
}

/// The text delta content block.
Expand All @@ -222,13 +224,6 @@ pub struct StreamStop {
pub stop_sequence: Option<StopSequence>,
}

/// The delta usage of the stream.
#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
pub struct DeltaUsage {
/// The number of output tokens which were used.
pub output_tokens: u64,
}

#[cfg(test)]
mod tests {
use anyhow::Result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -899,6 +899,7 @@ impl WithStreamChat for AwsClient {
prompt_tokens: None,
output_tokens: None,
total_tokens: None,
cached_input_tokens: None,
},
}),
response,
Expand Down Expand Up @@ -962,6 +963,8 @@ impl WithStreamChat for AwsClient {
Some(usage.output_tokens() as u64);
new_state.metadata.total_tokens =
Some((usage.total_tokens()) as u64);
// AWS Bedrock does not currently support cached tokens
new_state.metadata.cached_input_tokens = None;
}
}
_ => {
Expand Down Expand Up @@ -1303,6 +1306,7 @@ impl WithChat for AwsClient {
.usage
.as_ref()
.and_then(|i| i.total_tokens.try_into().ok()),
cached_input_tokens: None, // AWS Bedrock does not currently support cached tokens
},
}),
Err(e) => LLMResponse::LLMFailure(LLMErrorResponse {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ pub fn parse_google_response<C: WithClient + RequestBuilder>(
prompt_tokens: response.usage_metadata.prompt_token_count,
output_tokens: response.usage_metadata.candidates_token_count,
total_tokens: response.usage_metadata.total_token_count,
cached_input_tokens: response.usage_metadata.cached_content_token_count,
},
})
}
Expand Down Expand Up @@ -171,6 +172,7 @@ pub fn scan_google_response_stream(
inner.metadata.prompt_tokens = event.usage_metadata.prompt_token_count;
inner.metadata.output_tokens = event.usage_metadata.candidates_token_count;
inner.metadata.total_tokens = event.usage_metadata.total_token_count;
inner.metadata.cached_input_tokens = event.usage_metadata.cached_content_token_count;

inner.latency = instant_now.elapsed();
Ok(())
Expand Down Expand Up @@ -285,6 +287,7 @@ mod tests {
prompt_token_count: Some(166),
candidates_token_count: Some(39),
total_token_count: Some(205),
cached_content_token_count: None,
},
};

Expand Down Expand Up @@ -331,6 +334,7 @@ mod tests {
prompt_tokens: Some(166),
output_tokens: Some(39),
total_tokens: Some(205),
cached_input_tokens: None,
},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,7 @@ pub struct UsageMetaData {
pub prompt_token_count: Option<u64>,
pub candidates_token_count: Option<u64>,
pub total_token_count: Option<u64>,
pub cached_content_token_count: Option<u64>,
}

#[cfg(test)]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,13 @@ pub fn parse_openai_response<C: WithClient + RequestBuilder>(
prompt_tokens: usage.map(|u| u.prompt_tokens),
output_tokens: usage.map(|u| u.completion_tokens),
total_tokens: usage.map(|u| u.total_tokens),
cached_input_tokens: usage.and_then(|u| {
// Extract cached tokens from input_tokens_details if available
u.input_tokens_details
.as_ref()
.and_then(|details| details.get("cached_tokens"))
.and_then(|cached| cached.as_u64())
}),
},
})
}
Expand Down Expand Up @@ -143,6 +150,12 @@ pub fn scan_openai_chat_completion_stream(
inner.metadata.prompt_tokens = Some(usage.prompt_tokens);
inner.metadata.output_tokens = Some(usage.completion_tokens);
inner.metadata.total_tokens = Some(usage.total_tokens);
inner.metadata.cached_input_tokens =
usage.input_tokens_details.as_ref().and_then(|details| {
details
.get("cached_tokens")
.and_then(|cached| cached.as_u64())
})
}

Ok(())
Expand Down Expand Up @@ -226,6 +239,7 @@ mod tests {
prompt_tokens: Some(128),
output_tokens: Some(71),
total_tokens: Some(199),
cached_input_tokens: Some(0),
},
};

Expand Down Expand Up @@ -322,6 +336,13 @@ pub fn parse_openai_responses_response<C: WithClient + RequestBuilder>(
prompt_tokens: usage.map(|u| u.prompt_tokens),
output_tokens: usage.map(|u| u.completion_tokens),
total_tokens: usage.map(|u| u.total_tokens),
cached_input_tokens: usage.and_then(|u| {
// Extract cached tokens from input_tokens_details if available
u.input_tokens_details
.as_ref()
.and_then(|details| details.get("cached_tokens"))
.and_then(|cached| cached.as_u64())
}),
},
})
}
Expand Down Expand Up @@ -390,6 +411,12 @@ pub fn scan_openai_responses_stream(
inner.metadata.prompt_tokens = Some(usage.prompt_tokens);
inner.metadata.output_tokens = Some(usage.completion_tokens);
inner.metadata.total_tokens = Some(usage.total_tokens);
inner.metadata.cached_input_tokens =
usage.input_tokens_details.as_ref().and_then(|details| {
details
.get("cached_tokens")
.and_then(|cached| cached.as_u64())
})
}
}
ResponseFailed { response, .. } => {
Expand Down Expand Up @@ -441,6 +468,12 @@ pub fn scan_openai_responses_stream(
inner.metadata.prompt_tokens = Some(usage.prompt_tokens);
inner.metadata.output_tokens = Some(usage.completion_tokens);
inner.metadata.total_tokens = Some(usage.total_tokens);
inner.metadata.cached_input_tokens =
usage.input_tokens_details.as_ref().and_then(|details| {
details
.get("cached_tokens")
.and_then(|cached| cached.as_u64())
})
}
}
OutputTextDelta { delta, .. } => {
Expand Down Expand Up @@ -507,6 +540,7 @@ mod responses_tests {
prompt_tokens: Some(36),
output_tokens: Some(87),
total_tokens: Some(123),
cached_input_tokens: Some(0),
},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,9 @@ pub struct CompletionUsage {
/// Total number of tokens used in the request (prompt + completion).
pub total_tokens: u64,
/// Additional fields that may be present in responses API
#[serde(alias = "prompt_tokens_details")]
pub input_tokens_details: Option<serde_json::Value>,
#[serde(alias = "completion_tokens_details")]
pub output_tokens_details: Option<serde_json::Value>,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ pub async fn make_stream_request(
prompt_tokens: None,
output_tokens: None,
total_tokens: None,
cached_input_tokens: None,
},
}),
move |accumulated: &mut Result<LLMCompleteResponse>, event| {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ pub fn parse_vertex_response<C: WithClient + RequestBuilder>(
prompt_tokens: usage_metadata.prompt_token_count,
output_tokens: usage_metadata.candidates_token_count,
total_tokens: usage_metadata.total_token_count,
cached_input_tokens: usage_metadata.cached_content_token_count,
},
})
}
Expand Down Expand Up @@ -162,6 +163,22 @@ pub fn scan_vertex_response_stream(
if choice.finish_reason == Some("STOP".to_string()) {
inner.metadata.baml_is_complete = true;
}
inner.metadata.prompt_tokens = event
.usage_metadata
.as_ref()
.and_then(|u| u.prompt_token_count);
inner.metadata.output_tokens = event
.usage_metadata
.as_ref()
.and_then(|u| u.candidates_token_count);
inner.metadata.total_tokens = event
.usage_metadata
.as_ref()
.and_then(|u| u.total_token_count);
inner.metadata.cached_input_tokens = event
.usage_metadata
.as_ref()
.and_then(|u| u.cached_content_token_count);
}

inner.latency = instant_now.elapsed();
Expand Down Expand Up @@ -257,6 +274,7 @@ mod tests {
prompt_tokens: Some(79),
output_tokens: Some(35),
total_tokens: Some(114),
cached_input_tokens: None,
},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ pub struct UsageMetaData {
pub prompt_token_count: Option<u64>,
pub candidates_token_count: Option<u64>,
pub total_token_count: Option<u64>,
pub cached_content_token_count: Option<u64>,
}

#[cfg(test)]
Expand Down Expand Up @@ -469,6 +470,7 @@ mod tests {
prompt_token_count: Some(11),
candidates_token_count: Some(433),
total_token_count: Some(444),
cached_content_token_count: None,
}),
};

Expand Down
2 changes: 2 additions & 0 deletions engine/baml-runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -542,6 +542,7 @@ impl BamlRuntime {
prompt_tokens: Some(50),
output_tokens: Some(50),
total_tokens: Some(100),
cached_input_tokens: None,
},
}),
// TODO: Run checks and asserts.
Expand Down Expand Up @@ -753,6 +754,7 @@ impl BamlRuntime {
prompt_tokens: Some(50),
output_tokens: Some(50),
total_tokens: Some(100),
cached_input_tokens: None,
},
})
}
Expand Down
1 change: 1 addition & 0 deletions engine/baml-runtime/src/test_constraints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ mod tests {
prompt_tokens: None,
output_tokens: None,
total_tokens: None,
cached_input_tokens: None,
},
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ impl<'a> IntoRpcEvent<'a, baml_rpc::runtime_api::LLMUsage>
input_tokens: self.input_tokens,
output_tokens: self.output_tokens,
total_tokens: self.total_tokens,
cached_input_tokens: self.cached_input_tokens,
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ pub fn make_trace_event_for_response(
input_tokens: llmcomplete_response.metadata.prompt_tokens,
output_tokens: llmcomplete_response.metadata.output_tokens,
total_tokens: llmcomplete_response.metadata.total_tokens,
cached_input_tokens: llmcomplete_response.metadata.cached_input_tokens,
},
llmcomplete_response.content.clone(),
client_stack,
Expand Down
Loading
Loading