fix: map provider for generic LLM streaming errors (#1062)

This commit is contained in:
jnjpng
2026-02-20 12:17:27 -08:00
committed by GitHub
parent d9b35895ff
commit 3fed452834
3 changed files with 25 additions and 4 deletions

View File

@@ -1405,8 +1405,9 @@ export default function App({
setErrorContext({
modelDisplayName: currentModelDisplay ?? undefined,
billingTier: billingTier ?? undefined,
modelEndpointType: llmConfig?.model_endpoint_type ?? undefined,
});
}, [currentModelDisplay, billingTier]);
}, [currentModelDisplay, billingTier, llmConfig?.model_endpoint_type]);
// Fetch billing tier once on mount
useEffect(() => {

View File

@@ -6,6 +6,7 @@
interface ErrorContext {
billingTier?: string;
modelDisplayName?: string;
modelEndpointType?: string;
}
let currentContext: ErrorContext = {};

View File

@@ -467,17 +467,18 @@ export function getRetryStatusMessage(
return "Anthropic API is overloaded, retrying...";
if (
errorDetail.includes("ChatGPT API error") ||
errorDetail.includes("ChatGPT server error") ||
errorDetail.includes("upstream connect error")
errorDetail.includes("ChatGPT server error")
) {
return "OpenAI ChatGPT backend connection failed, retrying...";
}
if (
errorDetail.includes("upstream connect error") ||
errorDetail.includes("Connection error during streaming") ||
errorDetail.includes("incomplete chunked read") ||
errorDetail.includes("connection termination")
) {
return "OpenAI ChatGPT streaming connection dropped, retrying...";
const provider = getProviderDisplayName();
return `${provider} streaming connection dropped, retrying...`;
}
if (errorDetail.includes("OpenAI API error"))
return "OpenAI API error, retrying...";
@@ -485,6 +486,24 @@ export function getRetryStatusMessage(
return DEFAULT_RETRY_MESSAGE;
}
const ENDPOINT_TYPE_DISPLAY_NAMES: Record<string, string> = {
openai: "OpenAI",
anthropic: "Anthropic",
chatgpt_oauth: "ChatGPT",
google_ai: "Google AI",
google_vertex: "Google Vertex",
bedrock: "AWS Bedrock",
openrouter: "OpenRouter",
minimax: "MiniMax",
zai: "zAI",
};
function getProviderDisplayName(): string {
const { modelEndpointType } = getErrorContext();
if (!modelEndpointType) return "LLM";
return ENDPOINT_TYPE_DISPLAY_NAMES[modelEndpointType] ?? modelEndpointType;
}
/**
* Create a terminal hyperlink to the agent with run ID displayed
*/