fix: improve model/reasoning display in footer and slash preview (#1008)

This commit is contained in:
Charles Packer
2026-02-17 22:23:47 -08:00
committed by GitHub
parent 245390adb0
commit b505ea6117
5 changed files with 103 additions and 4 deletions

View File

@@ -192,6 +192,22 @@ export function getModelUpdateArgs(
* @returns The model entry if found, null otherwise
*/
function findModelByHandle(handle: string): (typeof models)[number] | null {
const pickPreferred = (candidates: (typeof models)[number][]) =>
candidates.find((m) => m.isDefault) ??
candidates.find((m) => m.isFeatured) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "medium",
) ??
candidates.find(
(m) =>
(m.updateArgs as { reasoning_effort?: unknown } | undefined)
?.reasoning_effort === "high",
) ??
candidates[0] ??
null;
// Try exact match first
const exactMatch = models.find((m) => m.handle === handle);
if (exactMatch) return exactMatch;
@@ -204,7 +220,7 @@ function findModelByHandle(handle: string): (typeof models)[number] | null {
const modelPortion = rest.join("/");
// Find models with the same provider where the model portion is contained
// in the models.json handle (handles vendor prefixes and version suffixes)
const partialMatch = models.find((m) => {
const providerMatches = models.filter((m) => {
if (!m.handle.startsWith(`${provider}/`)) return false;
const mModelPortion = m.handle.slice(provider.length + 1);
// Check if either contains the other (handles both directions)
@@ -213,7 +229,17 @@ function findModelByHandle(handle: string): (typeof models)[number] | null {
modelPortion.includes(mModelPortion)
);
});
if (partialMatch) return partialMatch;
const providerMatch = pickPreferred(providerMatches);
if (providerMatch) return providerMatch;
// Cross-provider fallback by model suffix. This helps when llm_config reports
// provider_type=openai for BYOK models that are represented in models.json
// under a different provider prefix (e.g. chatgpt-plus-pro/*).
const suffixMatches = models.filter((m) =>
m.handle.endsWith(`/${modelPortion}`),
);
const suffixMatch = pickPreferred(suffixMatches);
if (suffixMatch) return suffixMatch;
}
return null;

View File

@@ -1295,6 +1295,20 @@ export default function App({
currentModelLabel.split("/").pop())
: null;
const currentModelProvider = llmConfig?.provider_name ?? null;
const llmReasoningEffort = llmConfig?.reasoning_effort;
const llmEnableReasoner = (llmConfig as { enable_reasoner?: boolean | null })
?.enable_reasoner;
const currentReasoningEffort: ModelReasoningEffort | null =
llmReasoningEffort === "none" ||
llmReasoningEffort === "minimal" ||
llmReasoningEffort === "low" ||
llmReasoningEffort === "medium" ||
llmReasoningEffort === "high" ||
llmReasoningEffort === "xhigh"
? llmReasoningEffort
: llmEnableReasoner === false
? "none"
: null;
// Billing tier for conditional UI and error context (fetched once on mount)
const [billingTier, setBillingTier] = useState<string | null>(null);
@@ -11177,6 +11191,7 @@ Plan file path: ${planFilePath}`;
agentName={agentName}
currentModel={currentModelDisplay}
currentModelProvider={currentModelProvider}
currentReasoningEffort={currentReasoningEffort}
messageQueue={messageQueue}
onEnterQueueEditMode={handleEnterQueueEditMode}
onEscapeCancel={

View File

@@ -1,6 +1,7 @@
import { Box } from "ink";
import Link from "ink-link";
import { memo, useMemo } from "react";
import type { ModelReasoningEffort } from "../../agent/model";
import { DEFAULT_AGENT_NAME } from "../../constants";
import { settingsManager } from "../../settings-manager";
import { getVersion } from "../../version";
@@ -10,16 +11,32 @@ import { Text } from "./Text";
interface AgentInfoBarProps {
agentId?: string;
agentName?: string | null;
currentModel?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
serverUrl?: string;
conversationId?: string;
}
function formatReasoningLabel(
effort: ModelReasoningEffort | null | undefined,
): string | null {
if (effort === "none") return "no";
if (effort === "xhigh") return "max";
if (effort === "minimal") return "minimal";
if (effort === "low") return "low";
if (effort === "medium") return "medium";
if (effort === "high") return "high";
return null;
}
/**
* Shows agent info bar with current agent details and useful links.
*/
export const AgentInfoBar = memo(function AgentInfoBar({
agentId,
agentName,
currentModel,
currentReasoningEffort,
serverUrl,
conversationId,
}: AgentInfoBarProps) {
@@ -38,6 +55,10 @@ export const AgentInfoBar = memo(function AgentInfoBar({
? `https://app.letta.com/agents/${agentId}${conversationId && conversationId !== "default" ? `?conversation=${conversationId}` : ""}`
: "";
const showBottomBar = agentId && agentId !== "loading";
const reasoningLabel = formatReasoningLabel(currentReasoningEffort);
const modelLine = currentModel
? `${currentModel}${reasoningLabel ? ` (${reasoningLabel})` : ""}`
: null;
if (!showBottomBar) {
return null;
@@ -101,9 +122,15 @@ export const AgentInfoBar = memo(function AgentInfoBar({
{!isCloudUser && <Text dimColor>{serverUrl}</Text>}
</Box>
{/* Alien + Agent ID */}
{/* Model summary */}
<Box>
<Text color={colors.footer.agentName}>{alienLines[2]}</Text>
<Text dimColor>{modelLine ?? "model unknown"}</Text>
</Box>
{/* Agent ID */}
<Box>
<Text>{alienLines[3]}</Text>
<Text dimColor>{agentId}</Text>
</Box>

View File

@@ -1,5 +1,6 @@
import { Box } from "ink";
import { useEffect } from "react";
import type { ModelReasoningEffort } from "../../agent/model";
import { AgentInfoBar } from "./AgentInfoBar";
import { FileAutocomplete } from "./FileAutocomplete";
import { SlashCommandAutocomplete } from "./SlashCommandAutocomplete";
@@ -13,6 +14,8 @@ interface InputAssistProps {
onAutocompleteActiveChange: (isActive: boolean) => void;
agentId?: string;
agentName?: string | null;
currentModel?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
serverUrl?: string;
workingDirectory?: string;
conversationId?: string;
@@ -33,6 +36,8 @@ export function InputAssist({
onAutocompleteActiveChange,
agentId,
agentName,
currentModel,
currentReasoningEffort,
serverUrl,
workingDirectory,
conversationId,
@@ -80,6 +85,8 @@ export function InputAssist({
<AgentInfoBar
agentId={agentId}
agentName={agentName}
currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
serverUrl={serverUrl}
conversationId={conversationId}
/>

View File

@@ -17,6 +17,7 @@ import {
useState,
} from "react";
import stringWidth from "string-width";
import type { ModelReasoningEffort } from "../../agent/model";
import { LETTA_CLOUD_API_URL } from "../../auth/oauth";
import {
ELAPSED_DISPLAY_THRESHOLD_MS,
@@ -50,6 +51,18 @@ function truncateEnd(value: string, maxChars: number): string {
return `${value.slice(0, maxChars - 3)}...`;
}
function getReasoningEffortTag(
effort: ModelReasoningEffort | null | undefined,
): string | null {
if (effort === "none") return "no";
if (effort === "xhigh") return "max";
if (effort === "minimal") return "minimal";
if (effort === "low") return "low";
if (effort === "medium") return "medium";
if (effort === "high") return "high";
return null;
}
/**
* Represents a visual line segment in the text.
* A visual line ends at either a newline character or when it reaches lineWidth.
@@ -203,6 +216,7 @@ const InputFooter = memo(function InputFooter({
showExitHint,
agentName,
currentModel,
currentReasoningEffort,
isOpenAICodexProvider,
isByokProvider,
hideFooter,
@@ -219,6 +233,7 @@ const InputFooter = memo(function InputFooter({
showExitHint: boolean;
agentName: string | null | undefined;
currentModel: string | null | undefined;
currentReasoningEffort?: ModelReasoningEffort | null;
isOpenAICodexProvider: boolean;
isByokProvider: boolean;
hideFooter: boolean;
@@ -230,10 +245,13 @@ const InputFooter = memo(function InputFooter({
const hideFooterContent = hideFooter;
const maxAgentChars = Math.max(10, Math.floor(rightColumnWidth * 0.45));
const displayAgentName = truncateEnd(agentName || "Unnamed", maxAgentChars);
const reasoningTag = getReasoningEffortTag(currentReasoningEffort);
const byokExtraChars = isByokProvider ? 2 : 0; // " ▲"
const reservedChars = displayAgentName.length + byokExtraChars + 4;
const maxModelChars = Math.max(8, rightColumnWidth - reservedChars);
const displayModel = truncateEnd(currentModel ?? "unknown", maxModelChars);
const modelWithReasoning =
(currentModel ?? "unknown") + (reasoningTag ? ` (${reasoningTag})` : "");
const displayModel = truncateEnd(modelWithReasoning, maxModelChars);
const rightTextLength =
displayAgentName.length + displayModel.length + byokExtraChars + 3;
const rightPrefixSpaces = Math.max(0, rightColumnWidth - rightTextLength);
@@ -522,6 +540,7 @@ export function Input({
agentName,
currentModel,
currentModelProvider,
currentReasoningEffort,
messageQueue,
onEnterQueueEditMode,
onEscapeCancel,
@@ -561,6 +580,7 @@ export function Input({
agentName?: string | null;
currentModel?: string | null;
currentModelProvider?: string | null;
currentReasoningEffort?: ModelReasoningEffort | null;
messageQueue?: QueuedMessage[];
onEnterQueueEditMode?: () => void;
onEscapeCancel?: () => void;
@@ -1293,6 +1313,8 @@ export function Input({
onAutocompleteActiveChange={setIsAutocompleteActive}
agentId={agentId}
agentName={agentName}
currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
serverUrl={serverUrl}
workingDirectory={process.cwd()}
conversationId={conversationId}
@@ -1307,6 +1329,7 @@ export function Input({
showExitHint={ralphActive || ralphPending}
agentName={agentName}
currentModel={currentModel}
currentReasoningEffort={currentReasoningEffort}
isOpenAICodexProvider={
currentModelProvider === OPENAI_CODEX_PROVIDER_NAME
}
@@ -1354,6 +1377,7 @@ export function Input({
ralphActive,
ralphPending,
currentModel,
currentReasoningEffort,
currentModelProvider,
hideFooter,
footerRightColumnWidth,