feat(core+web): store raw usage data on streams (and visualize properly in ADE) (#6452)

* feat(core): store raw usage data on streams

* fix(web): various fixes to deal w/ hardcoding against openai
This commit is contained in:
Charles Packer
2025-11-29 13:45:15 -08:00
committed by Caren Thomas
parent 88a3743cc8
commit 4af6465226
4 changed files with 49 additions and 1 deletions

View File

@@ -542,6 +542,9 @@ class SimpleOpenAIStreamingInterface:
self.cached_tokens: int | None = None
self.reasoning_tokens: int | None = None
# Raw usage from provider (for transparent logging in provider trace)
self.raw_usage: dict | None = None
# Fallback token counters (using tiktoken cl200k-base)
self.fallback_input_tokens = 0
self.fallback_output_tokens = 0
@@ -707,6 +710,12 @@ class SimpleOpenAIStreamingInterface:
if chunk.usage:
self.input_tokens += chunk.usage.prompt_tokens
self.output_tokens += chunk.usage.completion_tokens
# Store raw usage for transparent provider trace logging
try:
self.raw_usage = chunk.usage.model_dump(exclude_none=True)
except Exception as e:
logger.error(f"Failed to capture raw_usage from OpenAI chat completion chunk: {e}")
self.raw_usage = None
# Capture cache token details (OpenAI)
# Use `is not None` to capture 0 values (meaning "provider reported 0 cached tokens")
if hasattr(chunk.usage, "prompt_tokens_details") and chunk.usage.prompt_tokens_details:
@@ -876,6 +885,9 @@ class SimpleOpenAIResponsesStreamingInterface:
self.cached_tokens: int | None = None
self.reasoning_tokens: int | None = None
# Raw usage from provider (for transparent logging in provider trace)
self.raw_usage: dict | None = None
# -------- Mapping helpers (no broad try/except) --------
def _record_tool_mapping(self, event: object, item: object) -> tuple[str | None, str | None, int | None, str | None]:
"""Record call_id/name mapping for this tool-call using output_index and item.id if present.
@@ -1300,6 +1312,12 @@ class SimpleOpenAIResponsesStreamingInterface:
self.input_tokens = event.response.usage.input_tokens
self.output_tokens = event.response.usage.output_tokens
self.message_id = event.response.id
# Store raw usage for transparent provider trace logging
try:
self.raw_usage = event.response.usage.model_dump(exclude_none=True)
except Exception as e:
logger.error(f"Failed to capture raw_usage from OpenAI Responses API: {e}")
self.raw_usage = None
# Capture cache token details (Responses API uses input_tokens_details)
# Use `is not None` to capture 0 values (meaning "provider reported 0 cached tokens")
if hasattr(event.response.usage, "input_tokens_details") and event.response.usage.input_tokens_details: