diff --git a/src/cli/App.tsx b/src/cli/App.tsx index 69fa10b..65a9611 100644 --- a/src/cli/App.tsx +++ b/src/cli/App.tsx @@ -1405,8 +1405,9 @@ export default function App({ setErrorContext({ modelDisplayName: currentModelDisplay ?? undefined, billingTier: billingTier ?? undefined, + modelEndpointType: llmConfig?.model_endpoint_type ?? undefined, }); - }, [currentModelDisplay, billingTier]); + }, [currentModelDisplay, billingTier, llmConfig?.model_endpoint_type]); // Fetch billing tier once on mount useEffect(() => { diff --git a/src/cli/helpers/errorContext.ts b/src/cli/helpers/errorContext.ts index b770f82..6b08b66 100644 --- a/src/cli/helpers/errorContext.ts +++ b/src/cli/helpers/errorContext.ts @@ -6,6 +6,7 @@ interface ErrorContext { billingTier?: string; modelDisplayName?: string; + modelEndpointType?: string; } let currentContext: ErrorContext = {}; diff --git a/src/cli/helpers/errorFormatter.ts b/src/cli/helpers/errorFormatter.ts index df6b913..0da58ad 100644 --- a/src/cli/helpers/errorFormatter.ts +++ b/src/cli/helpers/errorFormatter.ts @@ -467,17 +467,18 @@ export function getRetryStatusMessage( return "Anthropic API is overloaded, retrying..."; if ( errorDetail.includes("ChatGPT API error") || - errorDetail.includes("ChatGPT server error") || - errorDetail.includes("upstream connect error") + errorDetail.includes("ChatGPT server error") ) { return "OpenAI ChatGPT backend connection failed, retrying..."; } if ( + errorDetail.includes("upstream connect error") || errorDetail.includes("Connection error during streaming") || errorDetail.includes("incomplete chunked read") || errorDetail.includes("connection termination") ) { - return "OpenAI ChatGPT streaming connection dropped, retrying..."; + const provider = getProviderDisplayName(); + return `${provider} streaming connection dropped, retrying...`; } if (errorDetail.includes("OpenAI API error")) return "OpenAI API error, retrying..."; @@ -485,6 +486,24 @@ export function getRetryStatusMessage( return DEFAULT_RETRY_MESSAGE; } +const ENDPOINT_TYPE_DISPLAY_NAMES: Record = { + openai: "OpenAI", + anthropic: "Anthropic", + chatgpt_oauth: "ChatGPT", + google_ai: "Google AI", + google_vertex: "Google Vertex", + bedrock: "AWS Bedrock", + openrouter: "OpenRouter", + minimax: "MiniMax", + zai: "zAI", +}; + +function getProviderDisplayName(): string { + const { modelEndpointType } = getErrorContext(); + if (!modelEndpointType) return "LLM"; + return ENDPOINT_TYPE_DISPLAY_NAMES[modelEndpointType] ?? modelEndpointType; +} + /** * Create a terminal hyperlink to the agent with run ID displayed */